content
stringlengths 5
1.05M
|
---|
from typing import Any, Dict, List, Optional, Tuple
from autoPyTorch.pipeline.components.base_choice import autoPyTorchChoice
from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.TabularColumnTransformer import \
TabularColumnTransformer
from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.encoding import EncoderChoice
from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.imputation.SimpleImputer import SimpleImputer
from autoPyTorch.pipeline.components.preprocessing.tabular_preprocessing.scaling import ScalerChoice
from autoPyTorch.pipeline.tabular_classification import TabularClassificationPipeline
class TabularPipeline(TabularClassificationPipeline):
def _get_pipeline_steps(self, dataset_properties: Optional[Dict[str, Any]],
) -> List[Tuple[str, autoPyTorchChoice]]:
"""
Defines what steps a pipeline should follow.
The step itself has choices given via autoPyTorchChoice.
Returns:
List[Tuple[str, autoPyTorchChoice]]: list of steps sequentially exercised
by the pipeline.
"""
steps = [] # type: List[Tuple[str, autoPyTorchChoice]]
default_dataset_properties = {'target_type': 'tabular_classification'}
if dataset_properties is not None:
default_dataset_properties.update(dataset_properties)
steps.extend([
("imputer", SimpleImputer()),
("encoder", EncoderChoice(default_dataset_properties)),
("scaler", ScalerChoice(default_dataset_properties)),
("tabular_transformer", TabularColumnTransformer()),
])
return steps
|
#!/usr/bin/python
"""This is the flask server of the a windows VM
It accepts the requests in the form of protobufs
and executes the same in the specified path
"""
import argparse
import logging
import os
from pathlib import Path
import shutil
import subprocess
import threading
import timeit
import sys
import requests
from waitress import serve
from flask import Flask, request
from google.cloud import storage
import repackage
repackage.up(2)
from vm_server.send.proto import request_pb2
sem = threading.Semaphore()
MASTER_SERVER = "http://127.0.0.1:5000"
task_status_response = request_pb2.TaskStatusResponse()
PORT = 8000
VM_ADDRESS = "127.0.0.1"
EXECUTE_DIR = "..\\execute"
EXECUTE_ACTION_DIR = "..\\execute\\action"
OUTPUT_DIR = "\\output\\"
BUCKET_NAME = "automation-interns"
DEBUG_FLAG = "DEBUG"
parser = argparse.ArgumentParser(description="VM Server")
parser.add_argument("debug_flag",
type=str,
help="""Usage: " + sys.argv[0] + DEBUG_FLAG + PORT"""
)
parser.add_argument("port",
type=int,
help="""Usage: " + sys.argv[0] + DEBUG_FLAG + PORT"""
)
arguments = parser.parse_args()
def get_processes(file_name):
"""Logs the current running processes in the file named file_name
Args:
file_name: Name of the file where the names of the processes are saved
"""
logging.debug("Getting the list of processes")
get_process = subprocess.Popen("powershell.exe \
Get-Process >{file}".format(file=file_name))
get_process.communicate()
def get_diff_processes():
"""Prints the difference in processes before and
after execution of a request
"""
logging.debug("Getting the diff of the processes")
compare_process = subprocess.Popen("powershell.exe Compare-Object \
(Get-Content process_before.txt)\
(Get-Content process_after.txt)")
compare_process.communicate()
def remove_execute_dir(task_request, task_response):
"""Deletes the execute directory if it exists
Args:
task_request: TaskRequest() object that is read from the protobuf
task_response: an object of TaskResponse() that will be sent back
"""
logging.debug("Removing execute directory")
dirpath = Path(EXECUTE_ACTION_DIR + "_" + str(task_request.request_id))
print("Dirpath is ", dirpath)
try:
if dirpath.exists() and dirpath.is_dir(): # delete leftover files
shutil.rmtree(dirpath)
except Exception as exception: # catch errors if any
logging.exception(str(exception))
logging.debug("Error deleting the execute directory")
task_response.status = request_pb2.TaskResponse.FAILURE
def make_directories(task_request, task_response):
"""Creates the directories for execution
Args:
task_request: TaskRequest() object that is read from the protobuf
task_response: an object of TaskResponse() that will be sent back
"""
logging.debug("Creating execute directory structure")
remove_execute_dir(task_request, task_response)
if task_response.status == request_pb2.TaskResponse.FAILURE:
return
current_path = EXECUTE_ACTION_DIR + "_" + str(task_request.request_id)
os.makedirs(EXECUTE_DIR, exist_ok=True)
os.mkdir(current_path)
os.mkdir(current_path + OUTPUT_DIR)
Path(EXECUTE_DIR + "\\__init__.py").touch() # __init__.py for package
Path(current_path + "\\__init__.py").touch()
try:
shutil.copytree(task_request.code_path, current_path + "\\code")
Path(current_path + "\\code\\__init__.py").touch() # __init__.py for package
data_path = Path(task_request.data_path)
if data_path.exists() is False:
os.mkdir(data_path)
shutil.copytree(task_request.data_path, current_path + "\\data")
except Exception as exception: # catch errors if any
logging.exception(str(exception))
logging.debug("Error copying code and data directories")
task_response.status = request_pb2.TaskResponse.FAILURE
def move_output(task_request, task_response):
"""Move the genrated output files to the output path specified
Args:
task_request: an object of TaskResponse() that is sent in the request
task_response: an object of TaskResponse() that will be sent back
"""
logging.debug("Moving the output path to the specified output path")
current_path = os.getcwd()
source_path = Path(current_path + "\\" + EXECUTE_ACTION_DIR + \
"_" + str(task_request.request_id) + OUTPUT_DIR)
if source_path.exists() is False:
os.mkdir(source_path)
destination_path = Path(current_path + "\\" + task_request.output_path)
if destination_path.exists() is False:
os.mkdir(destination_path)
files = os.listdir(source_path)
try:
for file in files:
shutil.move(os.path.join(source_path, file),
os.path.join(destination_path, file))
except Exception as exception: # catch errors if any
logging.exception(str(exception))
logging.debug("Error moving the output \
files to the specified output directory")
task_response.status = request_pb2.TaskResponse.FAILURE
def download_files_to_path(pantheon_path, destination_path, task_response):
"""Downloads files from pantheon path to the destination path
Args:
pantheon_path: the source path in pantheon
destination_path: the destination path where files are saved in the VM
task_response: an object of TaskResponse() that will be sent back
"""
bucket_name = BUCKET_NAME
storage_client = storage.Client()
blobs = storage_client.list_blobs(bucket_name, prefix=pantheon_path)
for blob in blobs:
source = Path(blob.name)
destination_file_path = Path(str(destination_path) \
+ "\\" + str(source.name))
if blob.name[len(blob.name)-1] == '/':
logging.debug("Making directory Destination path : %s", destination_path)
os.makedirs(destination_file_path, exist_ok=True)
else:
logging.debug("Downloading file Destination path : %s", destination_path)
os.makedirs(destination_path, exist_ok=True)
source = Path(blob.name)
logging.debug("Destination file path: %s", destination_file_path)
try:
blob.download_to_filename(destination_file_path)
except Exception as exception:
logging.debug("Error while downloading files, \
Exception: %s", str(exception))
task_response.status = request_pb2.TaskResponse.FAILURE
def download_input_files(task_request, task_response):
"""Downloads the input files from pantheon
Args:
task_request: TaskRequest() object that is read from the protobuf
task_response: an object of TaskResponse() that will be sent back
"""
remove_execute_dir(task_request, task_response)
current_path = EXECUTE_ACTION_DIR + "_" + str(task_request.request_id)
os.mkdir(current_path)
os.mkdir(current_path + OUTPUT_DIR)
Path(current_path + "\\__init__.py").touch()
download_files_to_path(task_request.code_path,
current_path + "\\code", task_response)
download_files_to_path(task_request.data_path,
current_path + "\\data", task_response)
def upload_output(task_request, task_response):
""" Upload the output files to pantheon
Args:
task_request: an object of TaskResponse() that is sent in the request
task_response: an object of TaskResponse() that will be sent back
"""
bucket_name = BUCKET_NAME
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
source_path = Path(EXECUTE_ACTION_DIR + "_" + \
str(task_request.request_id) + OUTPUT_DIR)
destination_path = task_request.output_path
files = os.listdir(source_path)
try:
for file in files:
destination_blob_path = (destination_path + file)
blob = bucket.blob(destination_blob_path)
blob.upload_from_filename(str(source_path) + "/" + str(file))
except Exception as exception:
logging.debug("Error while uploading output files, \
Exception: %s", str(exception))
task_response.status = request_pb2.TaskResponse.FAILURE
def execute_action(task_request, task_response):
""" Execute the action
Args:
task_request: an object of TaskResponse() that is sent in the request
task_response: an object of TaskResponse() that will be sent back
"""
if task_response.status == request_pb2.TaskResponse.FAILURE:
return
logging.debug("Trying to execute the action")
current_path = "..\\execute\\action" + "_" + str(task_request.request_id)
logging.debug("Action path is: %s",
str(current_path + task_request.target_path))
encoding = "utf-8"
out = None
err = None
status = None
try:
execute = subprocess.Popen(["powershell.exe", # execute the target file
task_request.target_path],
stdout=subprocess.PIPE,
cwd=current_path)
out, err = execute.communicate(timeout=task_request.timeout)
except Exception as exception: # catch errors if any
logging.debug(str(exception))
logging.debug("FAILED TO EXECUTE THE ACTION")
task_response.status = request_pb2.TaskResponse.FAILURE
err = str(exception).encode(encoding)
status = execute.returncode
if status:
logging.debug("Execution was unsuccessful")
task_response.status = request_pb2.TaskResponse.FAILURE
logging.debug("Process is running, force killing the process")
kill_process = subprocess.Popen("TASKKILL /F \
/PID {pid} /T".format(pid=execute.pid))
kill_process.communicate()
if out is None:
out = "".encode(encoding)
if err is None:
err = "".encode(encoding)
try:
std_out = open(current_path + OUTPUT_DIR + "stdout.txt", "w")
std_out.write(out.decode(encoding))
std_out.close()
std_err = open(current_path + OUTPUT_DIR + "stderr.txt", "w")
std_err.write(err.decode(encoding))
std_err.close()
output_files = [name for name in os.listdir(current_path + OUTPUT_DIR)
if os.path.isfile(current_path + OUTPUT_DIR + name)]
task_response.number_of_files = len(output_files)
if arguments.debug_flag == DEBUG_FLAG:
move_output(task_request, task_response)
else:
upload_output(task_request, task_response)
except Exception as exception:
logging.debug("Error writing in stdout, stderr %s", str(exception))
task_response.status = request_pb2.TaskResponse.FAILURE
def register_vm_address():
"""Send request to master server to inform that VM is free"""
data = "http://{}:{}".format(VM_ADDRESS, str(PORT))
try:
requests.get(MASTER_SERVER + str("/register"), data=data)
except Exception as exception:
logging.debug(str(exception))
logging.debug("Can't connect to the master server")
def task_completed(task_request, task_response):
"""Send response to the master server when the task has been executed
Args:
task_request: TaskRequest() object that is read from the protobuf
task_response: an object of TaskResponse() that will be sent back
"""
global task_status_response
if task_response.status != request_pb2.TaskResponse.FAILURE:
task_response.status = request_pb2.TaskResponse.SUCCESS
task_status_response.task_response.CopyFrom(task_response)
current_path = os.path.dirname(os.path.realpath("__file__"))
response_proto = os.path.join(current_path, ".\\task_completed_response.pb")
with open(response_proto, "wb") as status_response:
status_response.write(task_status_response.SerializeToString())
status_response.close()
remove_execute_dir(task_request, task_response)
logging.debug("Response Proto: %s", str(task_response))
# get_processes("process_after.txt")
# get_diff_processes()
with open(response_proto, "rb") as status_response:
try:
requests.post(url=MASTER_SERVER + "/success", files={"task_response": \
task_status_response.SerializeToString()})
except Exception as exception:
logging.debug(str(exception))
logging.debug("Can't connect to the master server")
status_response.close()
logging.debug("Releasing semaphore")
sem.release()
def set_environment_variables(task_request):
"""Set the environment variables in config pair
Args:
task_request: an object of TaskResponse() that is sent in the request
"""
for config_pair in task_request.config_pairs:
os.environ[config_pair.key] = config_pair.value
def execute_wrapper(task_request, task_response):
"""Execute the tasks in the request
Args:
task_request: an object of TaskResponse() that is sent in the request
task_response: an object of TaskResponse() that will be sent back
"""
global task_status_response
start = timeit.default_timer()
set_environment_variables(task_request)
if arguments.debug_flag == DEBUG_FLAG:
make_directories(task_request, task_response)
else:
download_input_files(task_request, task_response)
execute_action(task_request, task_response)
stop = timeit.default_timer()
time_taken = stop-start
logging.debug("Time taken is %s", str(time_taken))
task_response.time_taken = time_taken
task_status_response.status = request_pb2.TaskStatusResponse.COMPLETED
task_completed(task_request, task_response)
register_vm_address()
APP = Flask(__name__)
@APP.route("/get_status", methods=["POST"])
def get_status():
"""Endpoint for the master to know the status of the VM"""
global task_status_response
request_task_status_response = request_pb2.TaskStatusResponse()
request_task_status_response.ParseFromString(
request.files["task_request"].read()
)
response_task_status = task_status_response
if task_status_response.current_task_id != \
request_task_status_response.current_task_id:
response_task_status.status = request_pb2.TaskStatusResponse.INVALID_ID
return response_task_status.SerializeToString()
@APP.route("/assign_task", methods=["POST"])
def assign_task():
"""Endpoint to accept post requests with protobuffer"""
task_response = request_pb2.TaskResponse()
global task_status_response
# get_processes("process_before.txt")
if sem.acquire(blocking=False):
logging.debug("Accepted request: %s", str(request))
task_request = request_pb2.TaskRequest()
task_request.ParseFromString(request.files["task_request"].read())
logging.debug("Request Proto: %s", str(task_request))
thread = threading.Thread(target=execute_wrapper,
args=(task_request, task_response,))
thread.start()
task_status_response = request_pb2.TaskStatusResponse()
task_status_response.current_task_id = task_request.request_id
task_status_response.status = request_pb2.TaskStatusResponse.ACCEPTED
else:
task_status_response.status = request_pb2.TaskStatusResponse.REJECTED
# task_response.status = request_pb2.TaskResponse.BUSY
current_path = os.path.dirname(os.path.realpath("__file__"))
response_proto = os.path.join(current_path, ".\\response.pb")
logging.debug("Task Status Response: %s", str(task_status_response))
with open(response_proto, "wb") as response:
response.write(task_status_response.SerializeToString())
response.close()
task_request = request_pb2.TaskRequest()
task_request.ParseFromString(request.files["task_request"].read())
return task_status_response.SerializeToString()
@APP.route('/active', methods=['GET', 'POST'])
def is_active():
"""Master can check here if VM is active or not."""
return "VM Server is active"
@APP.route('/status', methods=['GET', 'POST'])
def flag_status():
"""Returns the state of VM"""
return "False"
if __name__ == "__main__":
logging.basicConfig(filename="server.log",
level=logging.DEBUG,
format="%(asctime)s:%(levelname)s: %(message)s")
logging.getLogger().addHandler(logging.StreamHandler())
# APP.run(debug=True)
PORT = sys.argv[2]
register_vm_address()
serve(APP, host="127.0.0.1", port=PORT)
|
# Tai Sakuma <[email protected]>
import ipywidgets as widgets
from IPython.display import display
from .base import Presentation
##__________________________________________________________________||
class ProgressBarJupyter(Presentation):
def __init__(self):
super().__init__()
self.interval = 0.05 # [second]
self.container_widget = None
self.active_box_list = [ ]
self.complete_box_list = [ ]
self.widget_dict = { } # {taskid: [box, bar, label]}
self._read_time()
def __repr__(self):
return '{}()'.format(
self.__class__.__name__
)
def _present(self):
self._create_widgets()
self._update_widgets()
def _create_widgets(self):
if self.container_widget is None:
self.container_widget = widgets.VBox()
display(self.container_widget)
for taskid in self._new_taskids:
report = self._report_dict[taskid]
self._create_widget(report)
def _create_widget(self, report):
bar = widgets.IntProgress(
value=report['done'], min=0, max=report['total'],
description='',
bar_style='', # 'success', 'info', 'warning', 'danger' or ''
orientation='horizontal'
)
label = widgets.HTML(value='')
box = widgets.HBox([bar, label])
self.active_box_list.append(box)
self.container_widget.children = self.complete_box_list + self.active_box_list
self.widget_dict[report['taskid']] = [box, bar, label]
def _update_widgets(self):
for taskid in self._finishing_taskids + self._active_taskids + self._new_taskids:
report = self._report_dict[taskid]
self._update_widget(report)
self._reorder_widgets(report)
if not self._new_taskids and not self._active_taskids:
self.container_widget = None
self.active_box_list[:] = [ ]
self.complete_box_list[:] = [ ]
self.widget_dict.clear()
def _update_widget(self, report):
percent = float(report['done'])/report['total'] if report['total'] > 0 else 1
percent = round(percent * 100, 2)
percent = '<pre>{:6.2f}%</pre>'.format(percent)
box = self.widget_dict[report['taskid']][0]
bar = self.widget_dict[report['taskid']][1]
bar.value = report['done']
bar.max = report['total']
bar.description = percent
if report['last']:
bar.bar_style = 'success'
label = self.widget_dict[report['taskid']][2]
name_field_length = 32
percent = float(report['done'])/report['total'] if report['total'] > 0 else 1
bar = (':' * int(percent * 40)).ljust(40, " ")
percent = round(percent * 100, 2)
name = report['name'][0:name_field_length]
if 'start_time' in report.keys():
elapsed_str, remaining_str = self._get_time_track(report['start_time'], percent)
label.value = '<pre> | {:8d} / {:8d} ({:s} / {:s}) |: {:<{}s}</pre>'.format(report['done'], report['total'], elapsed_str, remaining_str, name, name_field_length)
else:
label.value = '<pre> | {:8d} / {:8d} |: {:<{}s}</pre>'.format(report['done'], report['total'], name, name_field_length)
def _reorder_widgets(self, report):
for taskid in self._finishing_taskids:
box, bar, label = self.widget_dict[taskid]
if box in self.active_box_list:
self.active_box_list.remove(box)
self.complete_box_list.append(box)
self.container_widget.children = self.complete_box_list + self.active_box_list
##__________________________________________________________________||
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
def execute():
frappe.reload_doc("accounts", "doctype", "account")
account_table_columns = frappe.db.get_table_columns("Account")
if "debit_or_credit" in account_table_columns and "is_pl_account" in account_table_columns:
frappe.db.sql("""UPDATE tabAccount
SET root_type = CASE
WHEN (debit_or_credit='Debit' and is_pl_account = 'No') THEN 'Asset'
WHEN (debit_or_credit='Credit' and is_pl_account = 'No') THEN 'Liability'
WHEN (debit_or_credit='Debit' and is_pl_account = 'Yes') THEN 'Expense'
WHEN (debit_or_credit='Credit' and is_pl_account = 'Yes') THEN 'Income'
END
WHERE ifnull(parent_account, '') = ''
""")
else:
for key, root_type in (("asset", "Asset"), ("liabilities", "Liability"), ("expense", "Expense"),
("income", "Income")):
frappe.db.sql("""update tabAccount set root_type=%s where name like %s
and ifnull(parent_account, '')=''""", (root_type, "%" + key + "%"))
for root in frappe.db.sql("""SELECT name, lft, rgt, root_type FROM `tabAccount`
WHERE ifnull(parent_account, '')=''""", as_dict=True):
if root.root_type:
frappe.db.sql("""UPDATE tabAccount SET root_type=%s WHERE lft>%s and rgt<%s""",
(root.root_type, root.lft, root.rgt))
else:
print(b"Root type not found for {0}".format(root.name.encode("utf-8")))
|
#!/usr/bin/python
import os.path
import cppcodebase
import random
def CreateLibMakefile(lib_number, classes):
os.chdir(cppcodebase.lib_name(lib_number))
handle = file("Makefile", "w");
handle.write ("""COMPILER = g++
INC = -I..
CCFLAGS = -Wall $(INC)
ARCHIVE = ar
DEPEND = makedepend
.SUFFIXES: .o .cpp
""")
handle.write ("lib = lib_" + str(lib_number) + ".a\n")
handle.write ("src = \\\n")
for i in xrange(classes):
handle.write('class_' + str(i) + '.cpp \\\n')
handle.write ("""
objects = $(patsubst %.cpp, %.o, $(src))
all: depend $(lib)
$(lib): $(objects)
$(ARCHIVE) cr $@ $^
touch $@
.cpp.o:
$(COMPILER) $(CCFLAGS) -c $<
clean:
@rm $(objects) $(lib) 2> /dev/null
depend:
@$(DEPEND) $(INC) $(src)
""")
os.chdir('..')
def CreateFullMakefile(libs):
handle = file("Makefile", "w")
handle.write('subdirs = \\\n')
for i in xrange(libs):
handle.write('lib_' + str(i) + '\\\n')
handle.write("""
all: $(subdirs)
@for i in $(subdirs); do \
$(MAKE) -C $$i all; done
clean:
@for i in $(subdirs); do \
(cd $$i; $(MAKE) clean); done
depend:
@for i in $(subdirs); do \
(cd $$i; $(MAKE) depend); done
""")
def CreateCodebase(libs, classes, internal_includes, external_includes):
cppcodebase.SetDir('make')
cppcodebase.CreateSetOfLibraries(libs, classes, internal_includes, external_includes, CreateLibMakefile)
CreateFullMakefile(libs)
os.chdir('..')
|
import tensorflow as tf
import tensorflow_hub as hub
from PIL import Image
import numpy as np
def load_model(model_path):
reloaded_model = tf.keras.models.load_model(model_path, custom_objects={'KerasLayer':hub.KerasLayer})
return reloaded_model
def load_process_image(image_path, image_size):
# Open image from file and turn to numpy array
image = Image.open(image_path)
image = np.asarray(image)
# Conver to tensor, resize, normalise
image = tf.convert_to_tensor(image, tf.float32)
image = tf.image.resize(image, (image_size, image_size))
image /= 255
# Convert back to numpy array and add an extra dimension (needed by the model)
image = image.numpy()
image = np.expand_dims(image, axis=0)
return image
|
from py2neo import Node, Relationship, Graph, authenticate
from py2neo.ogm import GraphObject, Label, Property, RelatedFrom, RelatedTo
# from py2neo import Database
# basic Node, Relationship constructor
a = Node("Freelancer",
name="dono",
email="[email protected]",
noTelp="081081081081"
)
print(a)
b = Node("Freelancer",
name="kasino",
email="[email protected]",
noTelp="081081081081"
)
print(b)
c = Node("Freelancer",
name="indro",
email="[email protected]",
noTelp="081081081081"
)
print(c)
ab = Relationship(a,"Knows", b)
# print(ab)
# construct relationship with class
class StudyWith(Relationship): pass
ac = StudyWith(a, c)
# print(ac.type())
# Subgraph
s = ab | ac
print(s)
# print(s.nodes())
# print(s.relationships())
w = ab + Relationship(b, "in debt with", c) + ac
print(w)
print(" ")
print("-----------------")
print(" ")
print("-----------------")
# database connection
# graph_db = Graph()
authenticate("localhost:7474", "neo4j", "titiran7")
graph_db = Graph("bolt://localhost:7687")
print(graph_db)
# structure
class Role(GraphObject):
__primarykey__ = "role_name"
role_name = Property()
freelance = RelatedFrom('Freelancer','worked_in')
class Freelancer(GraphObject):
__primarykey__ = "name"
name = Property()
email= Property()
notelp= Property()
role = RelatedTo(Role, 'work_as')
|
#!/usr/bin/python
# encoding=utf-8
import sys
if sys.version_info[0] == 2:
# Python2
import core.AepSdkRequestSend as AepSdkRequestSend
else:
# Python3
from apis.core import AepSdkRequestSend
#ๅๆฐproductId: ็ฑปๅlong, ๅๆฐไธๅฏไปฅไธบ็ฉบ
# ๆ่ฟฐ:
def QueryProduct(appKey, appSecret, productId):
path = '/aep_product_management/product'
head = {}
param = {'productId':productId}
version = '20181031202055'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, None, version, application, None, key, 'GET')
if response is not None:
return response.read()
return None
#ๅๆฐsearchValue: ็ฑปๅString, ๅๆฐๅฏไปฅไธบ็ฉบ
# ๆ่ฟฐ:ไบงๅidๆ่
ไบงๅๅ็งฐ
#ๅๆฐpageNow: ็ฑปๅlong, ๅๆฐๅฏไปฅไธบ็ฉบ
# ๆ่ฟฐ:ๅฝๅ้กตๆฐ
#ๅๆฐpageSize: ็ฑปๅlong, ๅๆฐๅฏไปฅไธบ็ฉบ
# ๆ่ฟฐ:ๆฏ้กต่ฎฐๅฝๆฐ
def QueryProductList(appKey, appSecret, searchValue, pageNow, pageSize):
path = '/aep_product_management/products'
head = {}
param = {'searchValue':searchValue, 'pageNow':pageNow, 'pageSize':pageSize}
version = '20190507004824'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, None, version, application, None, key, 'GET')
if response is not None:
return response.read()
return None
#ๅๆฐMasterKey: ็ฑปๅString, ๅๆฐไธๅฏไปฅไธบ็ฉบ
# ๆ่ฟฐ:MasterKeyๅจ่ฏฅ่ฎพๅคๆๅฑไบงๅ็ๆฆๅตไธญๅฏไปฅๆฅ็
#ๅๆฐproductId: ็ฑปๅlong, ๅๆฐไธๅฏไปฅไธบ็ฉบ
# ๆ่ฟฐ:
def DeleteProduct(appKey, appSecret, MasterKey, productId):
path = '/aep_product_management/product'
head = {}
param = {'productId':productId}
version = '20181031202029'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, None, version, application, MasterKey, key, 'DELETE')
if response is not None:
return response.read()
return None
#ๅๆฐbody: ็ฑปๅjson, ๅๆฐไธๅฏไปฅไธบ็ฉบ
# ๆ่ฟฐ:body,ๅ
ทไฝๅ่ๅนณๅฐapi่ฏดๆ
def CreateProduct(appKey, appSecret, body):
path = '/aep_product_management/product'
head = {}
param = {}
version = '20191018204154'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, body, version, application, None, key, 'POST')
if response is not None:
return response.read()
return None
#ๅๆฐbody: ็ฑปๅjson, ๅๆฐไธๅฏไปฅไธบ็ฉบ
# ๆ่ฟฐ:body,ๅ
ทไฝๅ่ๅนณๅฐapi่ฏดๆ
def UpdateProduct(appKey, appSecret, body):
path = '/aep_product_management/product'
head = {}
param = {}
version = '20191018204806'
application = appKey
key = appSecret
response = AepSdkRequestSend.sendSDKRequest(path, head, param, body, version, application, None, key, 'PUT')
if response is not None:
return response.read()
return None
|
#!/usr/bin/env python
import sys
if sys.version < '2.6':
raise ImportError("Python versions older than 2.6 are not supported.")
import glob
import os.path
from setuptools import (setup, find_packages)
# set basic metadata
PACKAGENAME = 'JLAB_TESS'
DISTNAME = 'JLAB_TESS'
AUTHOR = 'Kevin Burdge'
AUTHOR_EMAIL = '[email protected]'
LICENSE = 'GPLv3'
# -- dependencies -------------------------------------------------------------
setup_requires = [
'setuptools',
]
install_requires = [
'matplotlib',
'numpy',
'astropy',
'time',
'pathlib',
'multiprocessing'
]
# -- run setup ----------------------------------------------------------------
packagenames = find_packages()
scripts = glob.glob(os.path.join('bin', '*'))
setup(name=DISTNAME,
provides=[PACKAGENAME],
version='0.0.1',
description=None,
long_description=None,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
packages=packagenames,
include_package_data=True,
scripts=scripts,
setup_requires=setup_requires,
install_requires=install_requires,
use_2to3=True,
classifiers=[
'Programming Language :: Python',
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
],
)
print(packagenames)
|
from firedrake import *
from firedrake.norms import errornorm
import os
## Class to solve an Elasticity problem of a bending rod.
#
# This class solves the Elasticity problem \f$-\nabla\cdot \sigma = f\f$ for a long
# rod under gravity load with
# \f$P_k\f$-finite elements in each component.
class ElasticRod:
##Contructor of ElasticRod class.
#
# Setup all necessary stuff to solve the problem.
#
# @param[in] nx integer grid parameter x-direction
# @param[in] ny integer grid parameter y-direction
# @param[in] nz integer grid parameter z-direction
# @param[in] lx length x-direction
# @param[in] ly length y-direction
# @param[in] lz length z-direction
def __init__(self, nx, ny, nz, lx=10, ly=1, lz=1):
self.setup_mesh(nx, ny, nz, lx, ly, lz)
self.setup_space()
self.setup_data()
self.setup_form()
self.setup_bc()
## Setup mesh for FEM computation.
#
# Setup unit square mesh with nx x ny x nz elements.
#
# @param[in] nx integer grid parameter x-direction
# @param[in] ny integer grid parameter y-direction
# @param[in] nz integer grid parameter z-direction
# @param[in] lx length x-direction
# @param[in] ly length y-direction
# @param[in] lz length z-direction
def setup_mesh(self, nx, ny, nz, lx, ly, lz):
self.mesh = BoxMesh(nx, ny, nz, lx, ly, lz)
## Setup FEf spaces.
#
# Setup a conformal function space for the problem. We use simple Lagrange elements.
# function space ``V``.
def setup_space(self):
self.V = VectorFunctionSpace(self.mesh, "CG", 1)
## Declare source function
#
# Declare ``f`` over the space V and initialise
# it with chosen right hand side function value.
def setup_data(self):
self.__rho = Constant(2710)
self.__g = Constant(9.81)
self.__mu = Constant(2.57e10)
self.__lambda = Constant(5.46e10)
self.f = as_vector([0, 0, -self.__rho * self.__g])
self.__Id = Identity(self.mesh.geometric_dimension()) # Identity tensor
## Strain tensor
def epsilon(self, u):
return 0.5*(grad(u) + grad(u).T)
## Hooke's law
def sigma(self, u):
return self.__lambda*div(u)*self.__Id + 2*self.__mu*self.epsilon(u)
## Form and function setup.
#
# Define test and trial functions on the subspace of the function
# space. Then define the variational forms.
def setup_form(self):
self.u = TrialFunction(self.V)
self.v = TestFunction(self.V)
self.a = inner(self.sigma(self.u), self.epsilon(self.v)) * dx
self.L = dot(self.f, self.v) * dx
## Setup boundary conditions.
#
# The strongly enforced boundary conditions is enforced on the entire boundary.
def setup_bc(self):
# clamped at x=0
self.bc = DirichletBC(self.V, Constant([0, 0, 0]), 1)
#self.bc = DirichletBC(self.V, Constant([0, 0, 0]), 1)
## Call the solver.
#
# Then we solve the linear variational problem ``a == L``
# and advice PETSc to use a conjugate gradient method.
def solve(self, options=None, **kwargs):
# create rigid body modes
x, y, z = SpatialCoordinate(self.mesh)
b0 = Function(self.V)
b1 = Function(self.V)
b2 = Function(self.V)
b3 = Function(self.V)
b4 = Function(self.V)
b5 = Function(self.V)
b0.interpolate(Constant([1, 0, 0]))
b1.interpolate(Constant([0, 1, 0]))
b2.interpolate(Constant([0, 0, 1]))
b3.interpolate(as_vector([y, -x, z]))
b4.interpolate(as_vector([z, y, -x]))
b5.interpolate(as_vector([x, z, -y]))
nullmodes = VectorSpaceBasis([b0, b1, b2, b3, b4, b5])
# Make sure they're orthonormal.
nullmodes.orthonormalize()
self.uh = Function(self.V)
solve(self.a == self.L,
self.uh,
bcs=self.bc,
solver_parameters=options,
near_nullspace=nullmodes,
**kwargs)
## Write solution as *.vtk
#
# Lastly we write the component of the solution corresponding to the primal
# variable on the DG space to a file in VTK format for later inspection with a
# visualisation tool such as `ParaView <http://www.paraview.org/>`.
def write_solution(self):
output_dir_path = os.path.dirname(os.path.realpath(__file__))
File(output_dir_path + "/../data/elastic_rod_3d.pvd").write(self.uh)
if __name__ == '__main__':
problem = ElasticRod(100, 10, 10)
problem.solve(options={"ksp_type": "cg",
"ksp_max_it": 100,
"pc_type": "gamg",
"mat_type": "aij",
"ksp_monitor": None})
problem.write_solution()
print("...done.")
|
from selenium import webdriver
from facebook_credentials import username,password
from time import sleep
class TinderBot():
def __init__(self):
self.driver = webdriver.Chrome()
def login(self):
self.driver.get('https://tinder.com')
sleep(5)
fb_btn = self.driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/div[2]/button')
fb_btn.click()
base_window = self.driver.window_handles[0]
self.driver.switch_to_window(bot.driver.window_handles[1])
email_in = self.driver.find_element_by_xpath('//*[@id="email"]')
email_in.send_keys(username)
pass_in = self.driver.find_element_by_xpath('//*[@id="pass"]')
pass_in.send_keys(password)
log_in = self.driver.find_element_by_xpath('//*[@id="u_0_0"]')
log_in.click()
self.driver.switch_to_window(base_window)
sleep(2)
popup1 = self.driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/button[1]')
popup1.click()
sleep(2)
popup2 = self.driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/button[1]')
popup2.click()
def like(self):
like_btn = self.driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div[1]/div/div[2]/button[3]')
like_btn.click()
def close_match(self):
keep_swipebtn = self.driver.find_element_by_xpath('//*[@id="modal-manager-canvas"]/div/div/div[1]/div/div[3]/a')
keep_swipebtn.click()
def close_popup(self):
self.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div[2]/button[2]').click()
def auto_swipe(self):
while True:
sleep(0.5)
try:
self.like()
except Exception:
try:
self.close_match()
except Exception:
self
bot = TinderBot()
bot.login()
bot.auto_swipe()
|
from merkki import Sprite
class Liikkuja(Sprite):
def __init__(self, x, y, merkki, taso):
print("liikkuja init")
super(Liikkuja, self).__init__(x, y, merkki)
self.taso = taso
def liiku(self, suunta):
print("liikkuja liiku")
uusix = self.x + suunta[0]
uusiy = self.y + suunta[1]
if self.taso.kartta[uusix][uusiy].tyhja:
self.x = uusix
self.y = uusiy
return True
else:
return False |
# Save to HDF because cPickle fails with very large arrays
# https://github.com/numpy/numpy/issues/2396
import h5py
import numpy as np
import tempfile
import unittest
def dict_to_hdf(fname, d):
"""
Save a dict-of-dict datastructure where values are numpy arrays
to a .hdf5 file
"""
with h5py.File(fname, 'w') as f:
def _dict_to_group(root, d):
for key, val in d.iteritems():
if isinstance(val, dict):
grp = root.create_group(key)
_dict_to_group(grp, val)
else:
root.create_dataset(key, data=val)
_dict_to_group(f, d)
def hdf_to_dict(fname):
"""
Loads a dataset saved using dict_to_hdf
"""
with h5py.File(fname, 'r') as f:
def _load_to_dict(root):
d = {}
for key, val in root.iteritems():
if isinstance(val, h5py.Group):
d[key] = _load_to_dict(val)
else:
d[key] = val.value
return d
return _load_to_dict(f)
def load(exp_name, ret_d=False, data_fname='data.hdf5'):
d = hdf_to_dict('../%s' % data_fname)
mosaic = d['mosaic']
id2label = d['id2label']
train_ij = d['experiments'][exp_name]['train_ij']
test_ij = d['experiments'][exp_name]['test_ij']
y_train = d['experiments'][exp_name]['y_train']
y_test = d['experiments'][exp_name]['y_test']
if ret_d:
return mosaic, id2label, train_ij, test_ij, y_train, y_test, d
else:
return mosaic, id2label, train_ij, test_ij, y_train, y_test
# -- Unit tests
class HDFIOTest(unittest.TestCase):
def test_hdfio(self):
d = {
'a' : np.random.rand(5, 3),
'b' : {
'c' : np.random.randn(1, 2),
'd' : {
'e' : np.random.randn(10, 5),
'f' : np.random.randn(10, 5),
}
}
}
with tempfile.NamedTemporaryFile() as f:
dict_to_hdf(f.name, d)
d2 = hdf_to_dict(f.name)
self.assertItemsEqual(d, d2)
if __name__ == '__main__':
unittest.main()
|
# !/usr/bin/hfo_env python3
# encoding utf-8
import argparse
import os
import pickle
import numpy as np
from agents.plastic_dqn_v1.agent.replay_buffer import ExperienceBuffer, \
LearnBuffer
from agents.plastic_dqn_v1 import config
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--team_name', type=str, default=None)
parser.add_argument('--dir', type=str)
# Parse arguments:
args = parser.parse_args()
team_name = args.team_name
directory = args.dir
print(f"[PLASTIC Train: {team_name}] dir={directory};")
step = 0
experience_file = config.EXPERIENCE_BUFFER_FORMAT.format(step=step)
data_file = os.path.join(directory, team_name, experience_file)
print("File ", data_file)
replay_buffer = list()
while os.path.isfile(data_file):
with open(data_file, "rb") as fp:
learn_buffer: LearnBuffer = pickle.load(fp)
data: list = learn_buffer.buffer
replay_buffer += data
print(f"Add stage {step} data. SIZE={len(data)}")
step += 1
experience_file = config.EXPERIENCE_BUFFER_FORMAT.format(
team_name=team_name, step=step)
data_file = os.path.join(directory, team_name, experience_file)
print("File ", data_file)
experience_buffer = ExperienceBuffer(np.array(replay_buffer))
experience_buffer.save_to_pickle(dir=directory, team_name=team_name)
print("\n!!!!!!!!! Train End !!!!!!!!!!!!\n\n") |
from python.code_challenges.linked_list.linked_list.linked_list import Linked_list
class Hashtable(Linked_list):
def __init__(self, size=1024, prime=564):
self.size = size
self.array = [None] * size
self.prime = prime
def add(self, key, value):
index = self.hash(key)
if self.array[index] is None:
self.array[index] = Linked_list()
self.array[index].insert([key, value])
return self.array[index]
else:
self.array[index].insert([key, value])
return self.array[index]
def get(self, key):
index = self.hash(key)
# print(self.array[index].head.value[0])
current_value = self.array[index].head
while current_value:
key_inside = current_value.value[0]
if key_inside == key:
return current_value.value[1]
current_value = current_value.next
def contains(self, key):
index = self.hash(key)
if self.array[index] is None:
return False
current_value = self.array[index].head
while current_value:
key_inside = current_value.value[0]
if key_inside == key:
return True
current_value = current_value.next
return False
def hash(self, key):
value = 0
for char in key:
value += ord(char)
index = (value * self.prime) % (self.size)
return index
ht = Hashtable()
ht.add("abd", 10)
ht.add("adb", 12)
ht.add("dba", 18)
ht.add("dab", 20)
|
from pathlib import Path
import pandas as pd
from matplotlib import pyplot as plt
from scipy import stats
from settings import PROJECT_ID
REPLICATION_DIR = Path('./ai2_replication')
BQ_EXPORT_PATH = REPLICATION_DIR / 'ai_papers_any_author.pkl.gz'
CITATION_COUNT_FIELD = "EstimatedCitation"
FIGURE_PATH = REPLICATION_DIR / 'output'
# [ai2]: Pull down table of AI papers from Redshift, and add on columns for the final US/China heuristics and the
# cutoffs for levels of how much a paper is cited.
if BQ_EXPORT_PATH.exists():
df = pd.read_pickle(BQ_EXPORT_PATH, compression='gzip')
else:
df = pd.read_gbq('select * from ai2_replication.ai_papers_any_author '
'where extract(year from CreatedDate) < 2019 '
' and extract(year from CreatedDate) > 1980',
project_id=PROJECT_ID)
df.to_pickle(BQ_EXPORT_PATH, compression='gzip')
# [jd] We already subset by year, above, but it doesn't seem effective
df = df.loc[(df['yr'] > 1980) & (df['yr'] < 2019)]
df["citation_count"] = df[CITATION_COUNT_FIELD].astype(int)
df["citation_count"].value_counts()
df['china'] = df.dotcn.astype(bool) | df.dothk.astype(bool) \
| df.china_name.astype(bool) | df.china_language.astype(bool) \
| df.china_city.astype(bool)
df['us'] = df.dotedu.astype(bool) | df.dotedu.astype(bool)
df['top_half_cutoff'] = df.groupby('yr').citation_count.transform(lambda x: (x - x) + x.quantile(0.5))
df['top_tenth_cutoff'] = df.groupby('yr').citation_count.transform(lambda x: (x - x) + x.quantile(0.9))
df['top_twentieth_cutoff'] = df.groupby('yr').citation_count.transform(lambda x: (x - x) + x.quantile(0.95))
df['top_hundredth_cutoff'] = df.groupby('yr').citation_count.transform(lambda x: (x - x) + x.quantile(0.99))
df['top_halfpercent_cutoff'] = df.groupby('yr').citation_count.transform(lambda x: (x - x) + x.quantile(0.995))
# JD: Write the final analysis table back to BQ
cutoffs = ['half', 'tenth', 'twentieth', 'hundredth', 'halfpercent']
bq_cols = ['PaperId', 'yr', 'china', 'us', 'citation_count'] + [f'top_{stub}' for stub in cutoffs]
# JD: nb these columns weren't in the original dataframe
for stub in ['half', 'tenth', 'twentieth', 'hundredth', 'halfpercent']:
df[f'top_{stub}'] = df['citation_count'] > df[f'top_{stub}_cutoff']
df[bq_cols].to_gbq('ai2_replication.analysis', project_id=PROJECT_ID, if_exists='replace')
# What's this (x - x) business above?
for col, q in zip(
['top_half_cutoff', 'top_tenth_cutoff', 'top_twentieth_cutoff', 'top_hundredth_cutoff',
'top_halfpercent_cutoff'],
[0.5, .9, .95, .99, .995]):
assert (df[col] == df.groupby('yr').citation_count.transform(lambda x: x.quantile(q))).all()
plt.close()
sums = df.groupby('yr').china.sum()
ax1 = sums.plot(label="# Papers", color='b')
ax1.set_xlabel('');
ax1.set_ylabel('# Papers')
ax2 = ax1.twinx()
df[df.citation_count > df.top_tenth_cutoff].groupby('yr').china.mean().plot(label='Top 10%', ax=ax2, color='g',
style='--')
df[df.citation_count <= df.top_half_cutoff].groupby('yr').china.mean().plot(label='Bottom Half', ax=ax2, color='r',
style='--')
ax2.set_xlabel('');
ax2.set_ylabel('Market Shares')
ax2.set_xlim([1980, 2018])
plt.title("China's Drop was in Bad Papers")
plt.minorticks_on()
plt.legend()
plt.savefig(FIGURE_PATH / 'chinas_drop_vs_market_share.png')
# Raw number of papers
plt.close()
ax1 = df.groupby('yr').china.sum().plot(label='China')
ax2 = df.groupby('yr').us.sum().plot(label='US')
plt.title('All AI Papers')
ax2.set_xlim([1980, 2018])
plt.legend();
plt.xlabel('');
plt.ylabel('# Papers')
plt.minorticks_on()
plt.savefig(FIGURE_PATH / 'all_papers.png')
# Market share for different levels of citation
cutoffcol_title_pairs = [
('top_half_cutoff', 'Share of Papers in the Top 50%'),
('top_twentieth_cutoff', 'Share of Papers in the Top 10% '),
('top_halfpercent_cutoff', 'Share of Papers in the Top 1%')
]
xlim = [1981, 2025]
ylim = [0.0, .75]
for cutoffcol, title in cutoffcol_title_pairs:
print(title)
# Create time series for each country
china_ts = df[df.citation_count > df[cutoffcol]].groupby('yr').china.mean()
us_ts = df[df.citation_count > df[cutoffcol]].groupby('yr').us.mean()
# fit lines to last 4 years
china_slope, china_intercept, r_value, p_value, std_err = stats.linregress([2015, 2016, 2017, 2018], china_ts[-4:])
us_slope, us_intercept, r_value, p_value, std_err = stats.linregress([2015, 2016, 2017, 2018], us_ts[-4:])
intercept_year = (china_intercept - us_intercept) / (us_slope - china_slope)
# Compute interpolations to plot
fit_years = pd.Series(range(2014, 2026), index=range(2014, 2026))
china_fit = fit_years * china_slope + china_intercept
us_fit = fit_years * us_slope + us_intercept
# Save a CSV
pd.DataFrame({'China': china_ts, 'US': us_ts,
'China Fit': china_fit, 'US Fit': us_fit}).to_csv(FIGURE_PATH / f'{title}.csv')
# Plot
plt.close()
ax1 = china_ts.plot(label='China')
ax2 = us_ts.plot(label='US')
ax1.set_xlim(xlim)
ax2.set_xlim(xlim)
# ax1.set_ylim(ylim)
# ax2.set_ylim(ylim)
china_fit = china_fit.plot(style='--', label='China Fit')
us_fit = us_fit.plot(style='--', label='US Fit')
china_fit.set_xlim(xlim)
# china_fit.set_ylim(ylim)
us_fit.set_xlim(xlim)
# us_fit.set_ylim(ylim)
# china_fit.set_ylim(ylim)
# us_fit.set_ylim(ylim)
plt.title(title + ' : Intercept in ' + str(int(intercept_year)))
plt.legend();
plt.xlabel('');
plt.ylabel('Market Share')
plt.minorticks_on()
plt.savefig(FIGURE_PATH / f'{title}.png')
|
#!/usr/bin/python3
import sqlite3
def studentData():
con = sqlite3.connect('student.db')
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS student(id INTEGER PRIMARY KEY, StdID text, FirstName text, LastName text, DoB text, Age text, Gender text, Adress text, Mobile text, ImgPath text)")
con.commit()
con.close()
def addStdRec(StdID, FirstName, LastName, DoB, Age, Gender, Adress, Mobile, ImgPath):
con = sqlite3.connect('student.db')
cur = con.cursor()
cur.execute("INSERT INTO student VALUES ( NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(StdID, FirstName, LastName, DoB, Age, Gender, Adress, Mobile, ImgPath))
con.commit()
con.close()
def viewData():
con = sqlite3.connect('student.db')
cur = con.cursor()
cur.execute("SELECT * FROM student")
rows = cur.fetchall()
con.close()
return rows
def deleteRec(id):
con = sqlite3.connect('student.db')
cur = con.cursor()
cur.execute("DELETE FROM student WHERE id = ?", (id,))
con.commit()
con.close()
def searchData(StdID="", FirstName="", LastName="", DoB="", Age="", Gender="", Adress="", Mobile="", ImgPath=""):
con = sqlite3.connect('student.db')
cur = con.cursor()
cur.execute("SELECT * FROM student WHERE StdID = ? OR FirstName = ? OR LastName = ? OR DoB = ? OR Age = ? OR Gender = ? OR Adress = ? OR Mobile= ? OR ImgPath= ?",
(StdID, FirstName, LastName, DoB, Age, Gender, Adress, Mobile, ImgPath))
rows = cur.fetchall()
con.close()
return rows
def dataUpdate(id, StdID="", FirstName="", LastName="", DoB="", Age="", Gender="", Adress="", Mobile="", ImgPath=""):
con = sqlite3.connect('student.db')
cur = con.cursor()
cur.execute("UPDATE student SET StdID = ? , FirstName = ? , LastName = ? ,DoB = ? , Age = ? , Gender = ? , Adress = ? , Mobile= ?, ImgPath= ?, WHERE id = ?",
(StdID, FirstName, LastName, DoB, Age, Gender, Adress, Mobile,ImgPath, id))
con.commit()
con.close()
studentData()
|
import torch
import torch.nn.functional as F
from torch import nn
from torch import sigmoid, tanh, relu_
class LockedDropout(nn.Module):
def __init__(self, dropout):
self.dropout = dropout
super().__init__()
def forward(self, x):
if not self.training or not self.dropout:
return x
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.dropout)
mask = m / (1 - self.dropout)
mask = mask.expand_as(x)
return mask * x
class WeightDrop(nn.Module):
def __init__(self, module, weights, dropout=0.0, variational=False):
super(WeightDrop, self).__init__()
self.module = module
self.weights = weights
self.dropout = dropout
self.variational = variational
self._setup()
def widget_demagnetizer_y2k_edition(*args, **kwargs):
# We need to replace flatten_parameters with a nothing function
# It must be a function rather than a lambda as otherwise pickling explodes
# We can't write boring code though, so ... WIDGET DEMAGNETIZER Y2K EDITION!
# (โฏยฐโกยฐ๏ผโฏ๏ธต โปโโป
return
def _setup(self):
# Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN
if issubclass(type(self.module), torch.nn.RNNBase):
self.module.flatten_parameters = self.widget_demagnetizer_y2k_edition
for name_w in self.weights:
print('Applying weight drop of {} to {}'.format(self.dropout, name_w))
w = getattr(self.module, name_w)
del self.module._parameters[name_w]
self.module.register_parameter(name_w + '_raw', nn.Parameter(w.data))
def set_weights(self):
for name_w in self.weights:
raw_w = getattr(self.module, name_w + '_raw')
if self.variational:
mask = torch.ones(raw_w.size(0), 1)
if raw_w.is_cuda: mask = mask.cuda()
mask = F.dropout(mask, p=self.dropout, training=True)
w = mask.expand_as(raw_w) * raw_w
else:
w = F.dropout(raw_w, p=self.dropout, training=self.training)
w = nn.Parameter(w)
setattr(self.module, name_w, w)
def forward(self, *args):
self.set_weights()
return self.module.forward(*args)
class NASCell(nn.Module):
def __init__(self, input_size, hidden_size, num_proj=None, use_biases=False):
super(NASCell, self).__init__()
self._num_units = hidden_size
self._num_proj = num_proj
self._use_biases = use_biases
self._input_size = input_size
num_proj = self._num_units if num_proj is None else num_proj
self.concat_w_m = nn.Parameter(torch.randn(num_proj, 8 * self._num_units))
self.concat_w_inputs = nn.Parameter(torch.randn(self._input_size, 8 * self._num_units))
if use_biases:
self.bias = nn.Parameter(torch.randn(8 * self._num_units))
if self._num_proj is not None:
self.concat_w_proj = nn.Parameter(torch.randn(self._num_units, 8 * self._num_proj))
def forward(self, input, state):
(m_prev, c_prev) = state
m_matrix = torch.mm(m_prev, self.concat_w_m)
input_matrix = torch.mm(input, self.concat_w_inputs)
if self._use_biases:
m_matrix = torch.add(m_matrix, self.b)
m_matrix_splits = torch.split(m_matrix, self._num_units, dim=1)
inputs_matrix_splits = torch.split(input_matrix, self._num_units, dim=1)
layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0])
layer1_1 = relu_(inputs_matrix_splits[1] + m_matrix_splits[1])
layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2])
layer1_3 = relu_(inputs_matrix_splits[3] * m_matrix_splits[3])
layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4])
layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5])
layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6])
layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7])
l2_0 = tanh(layer1_0 * layer1_1)
l2_1 = tanh(layer1_2 + layer1_3)
l2_2 = tanh(layer1_4 * layer1_5)
l2_3 = sigmoid(layer1_6 + layer1_7)
l2_0 = tanh(l2_0 + c_prev)
l3_0_pre = l2_0 * l2_1
new_c = l3_0_pre
l3_0 = l3_0_pre
l3_1 = tanh(l2_2 + l2_3)
new_m = tanh(l3_0 * l3_1)
if self._num_proj is not None:
new_m = torch.mm(new_m, self.concat_w_proj)
return new_m, (new_m, new_c)
class R2N2_VAR(nn.Module):
def __init__(self, num_inputs, rnn_config):
super(R2N2_VAR, self).__init__()
num_hidden, P, num_layer, dropout = rnn_config
self.encoder_rnn = nn.LSTM(num_inputs, num_hidden, 2, batch_first=True, bidirectional=True)
self.encoder_rnn = nn.GRUCell
self.wdec = WeightDrop(self.encoder_rnn, ['weight_hh_l0', 'weight_ih_l0'], dropout=dropout)
self.A = nn.ModuleList([nn.Linear(num_inputs, num_inputs) for _ in range(self.P)])
self.lock_drop = LockedDropout(dropout)
self.output_layer = nn.Linear(num_hidden * 2, num_inputs)
def forward(self, y, last_state=None):
sum_wyb = torch.zeros_like(y[:, 0, :])
for idx, layer in enumerate(self.A):
sum_wyb += layer(y[:, idx, :])
encoded_y, hidden_state = self.wdec(y, last_state)
encoded_y = self.lock_drop(encoded_y)
outputs = self.output_layer(encoded_y[:, -1, :])
return outputs + sum_wyb
|
import ast
import sys
import os.path
from glob import glob
from collections import defaultdict
class Node(object):
"""Tree structure for managing python dependencies"""
@classmethod
def new_nonleaf(cls):
return Node(False, defaultdict(Node.new_nonleaf))
def __init__(self, isleaf, children_dict):
self.children = children_dict
self.isleaf = isleaf
def __key(self):
return (self.children, self.isleaf)
def __eq__(self, other):
return self.__key() == other.__key()
def __hash__(self):
return hash(self.__key())
def to_dict(self):
return {
'children': { k: v.to_dict() for k, v in self.children.iteritems() },
'isleaf': str(self.isleaf),
}
def leaves(self):
for k, child in self.children.iteritems():
if child.isleaf:
yield k
for l in child.leaves():
yield k + '.' + l
def add_path(self, path): # absolute path.in.this.form
if len(path) == 0: return
components = path.split('.')
node = self
for component in components:
node = node.children[component]
node.isleaf = True # set leaf
def remove_path_and_children(self, path):
if len(path) == 0: return
components = path.split('.')
prev = None
node = self
for component in components:
prev = node
node = node.children[component]
if components[-1] in prev.children:
del prev.children[components[-1]]
def contains_prefix_of(self, path):
components = path.split('.')
prev = None
node = self
for component in components:
if node.isleaf: return True
if component in node.children:
prev = node
node = node.children[component]
else:
break
return node.isleaf
def print_tree(self):
self.print_tree_prefix('')
def print_tree_prefix(self, prefix):
for k, v in self.children.iteritems():
if v.isleaf is True:
print '%s%s [leaf]' % (prefix, k)
else:
print '%s%s' % (prefix, k)
v.print_tree_prefix(' ' + prefix)
class DepVisitor(ast.NodeVisitor):
def __init__(self):
self.imports = set([])
def visit_Import(self, node):
for alias in node.names:
self.imports.add(alias.name)
def visit_ImportFrom(self, node):
if node.level > 0:
return # ignore relative imports
for alias in node.names:
if alias.name == '*':
if node.module is not None:
self.imports.add(node.module)
else:
if node.module is not None:
impt = '%s.%s' % (node.module, alias.name)
self.imports.add(impt)
def __modules_with_root_module_path(path):
"""
Returns all modules beneath the root module path. This treats all
directories as packages regardless of whether or not they include
a __init__.py.
"""
modules = []
if os.path.isfile(path) and os.path.splitext(path)[1] == '.py' and os.path.basename(path) != '__init__.py':
name = os.path.splitext(os.path.basename(path))[0]
modules.append(name)
elif os.path.isdir(path):
pkg_name = os.path.basename(path)
modules.append(pkg_name)
for ff in os.listdir(path):
modules.extend(['.'.join([pkg_name, m]) for m in __modules_with_root_module_path(os.path.join(path, ff))])
return modules
def paths_to_root_modules(rootpath, ignore_paths=[], followlinks=True):
"""
Returns list of all paths to top-level (root) modules beneath
rootpath. Optional arguments: follow symbolic links, list of
directory paths to ignore (won't return any modules at or under
this path).
"""
if any([os.path.normpath(rootpath).startswith(os.path.normpath(ignore_path))
for ignore_path in ignore_paths]):
return []
if os.path.isfile(rootpath) and os.path.splitext(rootpath)[1] == '.py':
if rootpath.endswith('/setup.py') or rootpath == 'setup.py':
return []
else:
return [rootpath]
if os.path.exists(os.path.join(rootpath, '__init__.py')):
return [rootpath]
if os.path.isfile(rootpath) or (os.path.islink(rootpath) and not followlinks):
return []
module_paths = []
for ff in os.listdir(rootpath):
subpath = os.path.join(rootpath, ff)
module_paths.extend(paths_to_root_modules(subpath, ignore_paths, followlinks))
return module_paths
def modules_defined_in(path, ignore_paths=[], followlinks=True):
rootpaths = paths_to_root_modules(path, ignore_paths, followlinks)
modules = []
for r in rootpaths:
modules.extend(__modules_with_root_module_path(r))
return modules
def root_modules_defined_in(path, ignore_paths=[], followlinks=True):
"""
Paths passed as arguments should be absolute paths (there is no
input checking).
"""
rootpaths = paths_to_root_modules(path, ignore_paths, followlinks)
rootmodules = []
for r in rootpaths:
rootmodules.append(os.path.splitext(os.path.basename(r))[0])
return rootmodules
def import_tree_for_project(projectroot, **kwargs):
"""
Provides tree of imports for the project. By default, ignores
stdlib modules and internal modules. Also ignores explicit
relative import paths (even when ignore_internal is False) and
does not handle implicit relative import paths (it treats these as
absolute paths).
"""
ignore_stdlib = kwargs.get('ignore_stdlib', True)
ignore_internal = kwargs.get('ignore_internal', True)
ignore_tree = Node.new_nonleaf()
if ignore_internal:
for m in modules_defined_in(projectroot):
ignore_tree.add_path(m)
if ignore_stdlib:
for m in stdlib_root_modules():
ignore_tree.add_path(m)
import_tree = Node.new_nonleaf()
root_module_paths = paths_to_root_modules(projectroot)
for root_module_path in root_module_paths:
if os.path.isdir(root_module_path):
pyfiles = py_files_in_dir(root_module_path)
for pyfile in pyfiles:
add_imports_for_file_to_tree(root_module_path, pyfile, import_tree, ignore_tree)
else:
add_imports_for_file_to_tree(root_module_path, root_module_path, import_tree, ignore_tree)
return import_tree
def add_imports_for_file_to_tree(root_module_path, filename, import_tree, ignore_tree):
"""
root_module_path is either a *.py file or a directory containing __init__.py
"""
with open(filename) as ff:
try:
root = ast.parse(ff.read())
except:
sys.stderr.write('Could not parse file %s\n' % filename)
return
visitor = DepVisitor()
visitor.visit(root)
for impt in visitor.imports:
if len(impt) == 0: continue # empty import
if ignore_tree.contains_prefix_of(impt): continue # absolute path in ignore_tree
# TODO(bliu): ignore implicit relative imports
import_tree.add_path(impt)
def py_files_in_dir(rootdir, followlinks=True):
for root, dirs, files in os.walk(rootdir, followlinks=followlinks):
for ff in files:
if os.path.splitext(ff)[1] == '.py':
yield os.path.join(root, ff)
def stdlib_root_modules():
"""
Finds stdlib python packages (packages that shouldn't be
downloaded via pip.
"""
stdlib_dir, sitepkg_dir, global_sitepkg_dir = python_stdlib_dirs()
# python modules
py_modules = root_modules_defined_in(stdlib_dir, [global_sitepkg_dir, sitepkg_dir])
# c modules
dynload_dir = os.path.join(stdlib_dir, 'lib-dynload/*')
so_modules = [os.path.splitext(os.path.basename(path))[0] for path in glob(dynload_dir)]
return set(so_modules) | set(py_modules) | set(sys.builtin_module_names)
def python_stdlib_dirs():
"""
Returns (<stdlib-dir>, <sitepkg-dir>, <global-sitepkg-dir>). This
exists because sysconfig.get_python_lib(standard_lib=True) returns
something surprising when running a virtualenv python (the path to
the global python standard lib directory, rather than the
virtualenv standard lib directory), whereas
sysconfig.get_python_lib(standard_lib=False) returns a path to the
local site-packages directory. When processing the standard lib
directory (global), we should ignore the global site-packages
directory, not just the local one (which wouldn't get processed
anyway).
"""
import distutils.sysconfig as sysconfig
sitepkg_dir = sysconfig.get_python_lib(standard_lib=False)
stdlib_dir = sysconfig.get_python_lib(standard_lib=True)
return (stdlib_dir, sitepkg_dir, os.path.join(stdlib_dir, 'site-packages'))
|
import logging
from threading import Event
from __config__ import logging_config as config
from ..global_objects import setup_all
from ..monitors import monitorfactory # do not delete!!
logging.basicConfig(
level=config.level, format=config.loggin_format, filename=config.filename
)
start_event = Event()
__all__ = ["run", "start_event"]
def setup_global_objects():
setup_all()
def run():
setup_global_objects()
start_event.set()
|
import neural_network_lyapunov.lyapunov as lyapunov
import neural_network_lyapunov.hybrid_linear_system as hybrid_linear_system
import neural_network_lyapunov.utils as utils
import neural_network_lyapunov.relu_system as relu_system
import unittest
import numpy as np
import torch
import gurobipy
class TestLyapunovDiscreteTimeHybridSystemROA(unittest.TestCase):
"""
This tests computing the region of attraction given the Lyapunov function
and the verified region.
"""
def setUp(self):
# Define three dynamical systems
# System 1 has all the states contracting, so if x[n] is within the
# box x_lo <= x[n] <= x_up, then x[n+1] is guaranteed to be within the
# box.
# System 2 has all the states expanding, so if x[n] is outside of the
# box, x[n+1] is guaranteed to be outside of the box.
# System 3 has some states contracting, and some states expanding.
self.dtype = torch.float64
self.system1 = hybrid_linear_system.AutonomousHybridLinearSystem(
2, self.dtype)
self.system2 = hybrid_linear_system.AutonomousHybridLinearSystem(
2, self.dtype)
self.system3 = hybrid_linear_system.AutonomousHybridLinearSystem(
2, self.dtype)
self.x_equilibrium = torch.zeros((2, ), dtype=self.dtype)
def _add_mode1(system, A):
system.add_mode(
A, torch.zeros((2, ), dtype=self.dtype),
torch.cat((torch.eye(
2, dtype=self.dtype), -torch.eye(2, dtype=self.dtype)),
dim=0), torch.tensor([1, 1, 0, 1], dtype=self.dtype))
def _add_mode2(system, A):
system.add_mode(
A, torch.zeros((2, ), dtype=self.dtype),
torch.cat((torch.eye(
2, dtype=self.dtype), -torch.eye(2, dtype=self.dtype)),
dim=0), torch.tensor([0, 1, 1, 1], dtype=self.dtype))
_add_mode1(self.system1,
torch.tensor([[0.5, 0], [0, 0.2]], dtype=self.dtype))
_add_mode2(self.system1,
torch.tensor([[0.2, 0], [0, 0.5]], dtype=self.dtype))
_add_mode1(self.system2,
torch.tensor([[1.5, 0], [0, 1.2]], dtype=self.dtype))
_add_mode2(self.system2,
torch.tensor([[1.2, 0], [0, 1.5]], dtype=self.dtype))
_add_mode1(self.system3,
torch.tensor([[1.5, 0], [0, 1.2]], dtype=self.dtype))
_add_mode2(self.system3,
torch.tensor([[0.2, 0], [0, 0.5]], dtype=self.dtype))
self.lyap_relu = utils.setup_relu((2, 4, 1),
params=None,
bias=True,
negative_slope=0.1,
dtype=self.dtype)
self.lyap_relu[0].weight.data = torch.tensor(
[[1.5, 0.3], [0.2, -0.4], [1.2, -0.4], [0.7, 0.1]],
dtype=self.dtype)
self.lyap_relu[0].bias.data = torch.tensor([0.4, -0.3, 1.1, 0.5],
dtype=self.dtype)
self.lyap_relu[2].weight.data = torch.tensor([[1., 0.5, -0.3, 1.2]],
dtype=self.dtype)
self.lyap_relu[2].bias.data = torch.tensor([0.3], dtype=self.dtype)
def construct_milp_roa_tester(self, dut, x_curr_in_box, is_milp_feasible):
V_lambda = 0.5
R = torch.tensor([[1, 3], [0.5, -1]], dtype=self.dtype)
x_lo_larger = np.array([-10, -10.])
x_up_larger = np.array([10., 10.])
# x_curr inside the box, and x_next outside the box.
milp, x_curr, x_next, t_slack, box_zeta = dut._construct_milp_for_roa(
V_lambda, R, self.x_equilibrium, x_lo_larger, x_up_larger,
x_curr_in_box)
milp.gurobi_model.setParam(gurobipy.GRB.Param.OutputFlag, False)
milp.gurobi_model.setParam(gurobipy.GRB.Param.DualReductions, False)
milp.gurobi_model.optimize()
if is_milp_feasible:
self.assertEqual(milp.gurobi_model.status,
gurobipy.GRB.Status.OPTIMAL)
x_curr_val = np.array([v.x for v in x_curr])
x_next_val = np.array([v.x for v in x_next])
self.assertAlmostEqual(
dut.lyapunov_value(torch.from_numpy(x_curr_val),
self.x_equilibrium,
V_lambda,
R=R).item(), milp.gurobi_model.ObjVal)
if x_curr_in_box:
in_box_x = x_curr_val
out_box_x = x_next_val
else:
in_box_x = x_next_val
out_box_x = x_curr_val
np.testing.assert_array_less(in_box_x, dut.system.x_up_all + 1E-7)
np.testing.assert_array_less(dut.system.x_lo_all - 1E-7, in_box_x)
self.assertFalse(
np.all(out_box_x <= dut.system.x_up_all - 1E-7)
and np.all(out_box_x >= dut.system.x_lo_all + 1E-7))
for i in range(len(t_slack)):
if box_zeta[i].x > 1 - 1E-7:
np.testing.assert_allclose(
np.array([v.x for v in t_slack[i]]), out_box_x)
else:
np.testing.assert_allclose(
np.array([v.x for v in t_slack[i]]),
np.zeros((dut.system.x_dim, )))
else:
self.assertEqual(milp.gurobi_model.status,
gurobipy.GRB.Status.INFEASIBLE)
def test_construct_milp_for_roa1(self):
dut = lyapunov.LyapunovDiscreteTimeHybridSystem(
self.system1, self.lyap_relu)
# Since system1 has contracting states, it is impossible to have
# x_curr inside the box while x_next outside the box.
self.construct_milp_roa_tester(dut,
x_curr_in_box=True,
is_milp_feasible=False)
# x_curr outside of the box, and x_next inside the box.
self.construct_milp_roa_tester(dut,
x_curr_in_box=False,
is_milp_feasible=True)
def test_construct_milp_for_roa2(self):
dut = lyapunov.LyapunovDiscreteTimeHybridSystem(
self.system2, self.lyap_relu)
# x_curr inside the box, x_next outside the box
self.construct_milp_roa_tester(dut,
x_curr_in_box=True,
is_milp_feasible=True)
# Since system2 has expanding states, it is impossible to have x_curr
# outside of the box, and x_next inside the box.
self.construct_milp_roa_tester(dut,
x_curr_in_box=False,
is_milp_feasible=False)
def test_construct_milp_for_roa3(self):
dut = lyapunov.LyapunovDiscreteTimeHybridSystem(
self.system3, self.lyap_relu)
# x_curr inside the box, x_next outside the box.
self.construct_milp_roa_tester(dut,
x_curr_in_box=True,
is_milp_feasible=True)
# x_curr outside the box, x_next inside the box.
self.construct_milp_roa_tester(dut,
x_curr_in_box=False,
is_milp_feasible=True)
def test_compute_region_of_attraction1(self):
dut = lyapunov.LyapunovDiscreteTimeHybridSystem(
self.system3, self.lyap_relu)
V_lambda = 0.5
R = torch.tensor([[1., 2.], [0.5, -1.]], dtype=self.dtype)
x_lo_larger = torch.tensor([-5, -5], dtype=self.dtype)
x_up_larger = torch.tensor([5, 5], dtype=self.dtype)
rho = dut.compute_region_of_attraction(V_lambda, R, self.x_equilibrium,
None, x_lo_larger, x_up_larger)
milp2, _, _, _, _ = dut._construct_milp_for_roa(
V_lambda, R, self.x_equilibrium, x_lo_larger, x_up_larger, False)
milp2.gurobi_model.setParam(gurobipy.GRB.Param.OutputFlag, False)
milp2.gurobi_model.optimize()
self.assertEqual(rho, milp2.gurobi_model.ObjVal)
def test_compute_region_of_attraction3(self):
dut = lyapunov.LyapunovDiscreteTimeHybridSystem(
self.system3, self.lyap_relu)
V_lambda = 0.5
R = torch.tensor([[1., 2.], [0.5, -1.]], dtype=self.dtype)
x_lo_larger = torch.tensor([-5, -5], dtype=self.dtype)
x_up_larger = torch.tensor([5, 5], dtype=self.dtype)
rho = dut.compute_region_of_attraction(V_lambda, R, self.x_equilibrium,
None, x_lo_larger, x_up_larger)
milp1, _, _, _, _ = dut._construct_milp_for_roa(
V_lambda, R, self.x_equilibrium, x_lo_larger, x_up_larger, True)
milp1.gurobi_model.setParam(gurobipy.GRB.Param.OutputFlag, False)
milp1.gurobi_model.optimize()
milp2, _, _, _, _ = dut._construct_milp_for_roa(
V_lambda, R, self.x_equilibrium, x_lo_larger, x_up_larger, False)
milp2.gurobi_model.setParam(gurobipy.GRB.Param.OutputFlag, False)
milp2.gurobi_model.optimize()
self.assertEqual(
rho, np.min([milp1.gurobi_model.ObjVal,
milp2.gurobi_model.ObjVal]))
class TestLyapunovHybridSystemROABoundary(unittest.TestCase):
def setUp(self):
self.dtype = torch.float64
torch.manual_seed(0)
lyapunov_relu = utils.setup_relu((2, 5, 6, 3, 1),
params=None,
negative_slope=0.1,
bias=True,
dtype=self.dtype)
forward_relu = utils.setup_relu((2, 4, 2),
params=None,
negative_slope=0.1,
bias=True,
dtype=self.dtype)
self.x_lo = torch.tensor([-2, -3], dtype=self.dtype)
self.x_up = torch.tensor([3, 5], dtype=self.dtype)
forward_system = relu_system.AutonomousReLUSystem(
self.dtype, self.x_lo, self.x_up, forward_relu)
self.dut = lyapunov.LyapunovDiscreteTimeHybridSystem(
forward_system, lyapunov_relu)
def construct_milp_for_roa_boundary(self, V_lambda, R, x_equilibrium):
milp, x = self.dut._construct_milp_for_roa_boundary(
V_lambda, R, x_equilibrium)
milp.gurobi_model.setParam(gurobipy.GRB.Param.OutputFlag, False)
milp.gurobi_model.optimize()
self.assertEqual(milp.gurobi_model.status, gurobipy.GRB.Status.OPTIMAL)
x_sol = torch.tensor([v.x for v in x], dtype=self.dtype)
# Check if the optimal solution is on the boundary.
self.assertTrue(
torch.logical_or(torch.any(torch.abs(x_sol - self.x_lo) < 1E-6),
torch.any(torch.abs(x_sol - self.x_up) < 1E-6)))
# Check if rho is computed correctly.
rho = milp.gurobi_model.ObjVal
self.assertAlmostEqual(self.dut.lyapunov_value(x_sol,
x_equilibrium,
V_lambda,
R=R).item(),
rho,
places=6)
# Now sample many states on the boundary, make sure V evaluated at
# these states are all above rho.
x_samples1 = utils.uniform_sample_in_box(
torch.tensor([self.x_lo[0], self.x_lo[1]], dtype=self.dtype),
torch.tensor([self.x_lo[0], self.x_up[1]], dtype=self.dtype), 1000)
x_samples2 = utils.uniform_sample_in_box(
torch.tensor([self.x_up[0], self.x_lo[1]], dtype=self.dtype),
torch.tensor([self.x_up[0], self.x_up[1]], dtype=self.dtype), 1000)
x_samples3 = utils.uniform_sample_in_box(
torch.tensor([self.x_lo[0], self.x_lo[1]], dtype=self.dtype),
torch.tensor([self.x_up[0], self.x_lo[1]], dtype=self.dtype), 1000)
x_samples4 = utils.uniform_sample_in_box(
torch.tensor([self.x_lo[0], self.x_up[1]], dtype=self.dtype),
torch.tensor([self.x_up[0], self.x_up[1]], dtype=self.dtype), 1000)
x_samples = torch.cat((x_samples1, x_samples2, x_samples3, x_samples4),
dim=0)
with torch.no_grad():
V_samples = self.dut.lyapunov_value(x_samples,
x_equilibrium,
V_lambda,
R=R)
np.testing.assert_array_less(rho - 1E-6, V_samples.detach().numpy())
def test_lyapunov_relu1(self):
self.dut.lyapunov_relu[0].weight.data = torch.tensor(
[[2, 4], [-1, 2], [0, 5], [-1, -3], [2, 4]], dtype=self.dtype)
self.construct_milp_for_roa_boundary(V_lambda=0.5,
R=torch.eye(2, dtype=self.dtype),
x_equilibrium=torch.tensor(
[0, 0], dtype=self.dtype))
def test_lyapunov_relu2(self):
self.dut.lyapunov_relu[0].weight.data = torch.tensor(
[[3, -4], [-4, 1], [0, 4], [-2, -3], [2, 4]], dtype=self.dtype)
self.construct_milp_for_roa_boundary(
V_lambda=0.5,
R=torch.tensor([[0, 1], [-1, 3], [2, 0]], dtype=self.dtype),
x_equilibrium=torch.tensor([1, 2], dtype=self.dtype))
if __name__ == "__main__":
unittest.main()
|
from setuptools import setup, find_packages
VERSION = '0.0.1'
DESCRIPTION = 'drawable image creator'
LONG_DESCRIPTION = 'prepare images for use in android studio projects'
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="drimg",
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author="hojjat-faryabi",
author_email="[email protected]",
url="https://github.com/hojjat-faryabi/drawable_image",
license='MIT',
packages=find_packages(),
install_requires=['Pillow', 'click'],
entry_points={
'console_scripts': [
'drimg = drimg.__main__:main'
]
},
keywords=['image', 'drawable', 'android', 'android-studio', 'drimg'],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
'License :: OSI Approved :: MIT License',
"Programming Language :: Python :: 3",
]
)
|
import time
import board
import busio
from adafruit_as726x import Adafruit_AS726x
#maximum value for sensor reading
max_val = 16000
#max number of characters in each graph
max_graph = 80
def graph_map(x):
return min(int(x * max_graph / max_val), max_graph)
# Initialize I2C bus and sensor.
i2c = busio.I2C(board.SCL, board.SDA)
sensor = Adafruit_AS726x(i2c)
sensor.conversion_mode = sensor.MODE_2
while True:
# Wait for data to be ready
while not sensor.data_ready:
time.sleep(.1)
#plot plot the data
print("\n")
print("V: " + graph_map(sensor.violet)*'=')
print("B: " + graph_map(sensor.blue)*'=')
print("G: " + graph_map(sensor.green)*'=')
print("Y: " + graph_map(sensor.yellow)*'=')
print("O: " + graph_map(sensor.orange)*'=')
print("R: " + graph_map(sensor.red)*'=')
time.sleep(1)
|
import time
class FPSCounter:
def __init__(self):
self.fps = 0.0
self._last_timestamp = self._millis()
@staticmethod
def _millis():
return time.time() * 1000.0
def reset(self):
self._last_timestamp = self._millis()
def update(self):
ts = self._millis()
delta = ts - self._last_timestamp
self.fps = 1000.0 / delta
self._last_timestamp = ts
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016-2020 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. [email protected]
# #*** <License> ************************************************************#
# This module is part of the package CAL.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# #*** </License> ***********************************************************#
#
#++
# Name
# CAL.G8R
#
# Purpose
# Reverse localization, i.e., globalization, for calendary names
#
# Revision Dates
# 10-Feb-2016 (CT) Creation
# 12-Feb-2016 (CT) Factor `Week_Day_Abbrs`
# 15-Feb-2016 (CT) Add `yearday`, `nlyearday`, `leapdays`
# 15-Feb-2016 (CT) Add test for `localized`
# 15-Feb-2016 (CT) Use `G8R_Multi`, not `Multi_Re_Replacer`
# 15-Jun-2016 (CT) Add `Recurrence_Units`
# 30-Nov-2016 (CT) Factor `Month_Abbrs`, `Month_Names`,
# `Week_Day_Abbrs_3`, and `Week_Day_Names`
# 30-Nov-2016 (CT) Use title-case for `Month_Abbrs`, `Month_Names`
# 30-Nov-2016 (CT) Use `LC`
# + Remove `lowercase` from `Months`, `Recurrence_Units`
# and all week-day related instances
# 1-Dec-2016 (CT) Factor `Units_Abs`, `Units_Abs_Abbr`, `Units_Delta`,
# `Units_YD`
# 11-Jul-2018 (CT) Adapt doctest to Python 3.7
# 19-Aug-2019 (CT) Use `print_prepr`
# ยซยซrevision-dateยปยปยทยทยท
#--
r"""
G8R provides globalizer objects for the names of months and weekdays and for
calendary units. The globalizers translate strings in the currently selected
language to the primary language (which often is english).
>>> from _TFL.portable_repr import print_prepr
>>> import _CAL.G8R
>>> mr1 = "Mรคrz-Mai"
>>> mr2 = "Jan, Mรคrz, Mai, Dez"
>>> wr1 = "Mo-Mi, Do, SO"
>>> wr2 = "MI(-1)"
>>> ur1 = "2 Tage 5 Stunden 3 min 5 sek"
>>> ur2 = "2 tage 5 stunden 3 MIN 5 SEK"
>>> ur3 = "2Tage 5Stunden 3min 5sek"
>>> ur4 = "2Tage5Stunden3min5sek"
>>> _show (CAL.G8R.Months, mr1)
de : Mรคrz-Mai --> March-May
>>> _show (CAL.G8R.Months.LC, mr1)
de : Mรคrz-Mai --> march-may
>>> _show (CAL.G8R.Months, mr2, localized_p = True)
de : Jan, Mรคrz, Mai, Dez --> Jan, March, May, Dec --> Jรคn, Mรคrz, Mai, Dez
>>> _show (CAL.G8R.Months.LC, mr2)
de : Jan, Mรคrz, Mai, Dez --> jan, march, may, dec
>>> _show (CAL.G8R.Week_Days, wr1)
de : Mo-Mi, Do, SO --> Mo-We, Th, SO
>>> _show (CAL.G8R.Week_Days.LC, wr1)
de : Mo-Mi, Do, SO --> mo-we, th, su
>>> _show (CAL.G8R.Week_Days, wr2)
de : MI(-1) --> MI(-1)
>>> _show (CAL.G8R.Week_Days.LC, wr2)
de : MI(-1) --> we(-1)
>>> _show (CAL.G8R.Units, ur1)
de : 2 Tage 5 Stunden 3 min 5 sek --> 2 days 5 hours 3 min 5 sec
>>> _show (CAL.G8R.Units, ur1.lower ())
de : 2 tage 5 stunden 3 min 5 sek --> 2 days 5 hours 3 min 5 sec
>>> _show (CAL.G8R.Units, ur2, localized_p = True)
de : 2 tage 5 stunden 3 MIN 5 SEK --> 2 days 5 hours 3 min 5 sec --> 2 tage 5 stunden 3 min 5 sek
>>> _show (CAL.G8R.Units, ur3)
de : 2Tage 5Stunden 3min 5sek --> 2days 5hours 3min 5sec
>>> _show (CAL.G8R.Units, ur4, localized_p = True)
de : 2Tage5Stunden3min5sek --> 2days5hours3min5sec --> 2tage5stunden3min5sek
>>> with TFL.I18N.test_language ("de") :
... CAL.G8R.All (mr1) == CAL.G8R.Months.LC (mr1)
True
>>> with TFL.I18N.test_language ("de") :
... CAL.G8R.All (mr2) == CAL.G8R.Months.LC (mr2)
True
>>> with TFL.I18N.test_language ("de") :
... CAL.G8R.All (wr1) == CAL.G8R.Week_Days.LC (wr1)
True
>>> with TFL.I18N.test_language ("de") :
... CAL.G8R.All (ur1) == CAL.G8R.Units (ur1)
True
>>> _show (CAL.G8R.All, "; ".join ([mr2, wr1, "2t 30m"]), localized_p = True)
de : Jan, Mรคrz, Mai, Dez; Mo-Mi, Do, SO; 2t 30m --> jan, march, may, dec; mo-we, th, su; 2d 30m --> jรคn, mรคrz, mai, dez; mo-mi, do, so; 2t 30m
>>> with TFL.I18N.test_language ("de") :
... print_prepr (sorted (CAL.G8R.Months.keys))
['Apr', 'April', 'Aug', 'August', 'Dec', 'December', 'Feb', 'February', 'Jan', 'January', 'Jul', 'July', 'Jun', 'June', 'Mar', 'March', 'May', 'Nov', 'November', 'Oct', 'October', 'Sep', 'September']
>>> with TFL.I18N.test_language ("de") :
... print_prepr (sorted (CAL.G8R.Months.map.items ()))
[('Dez', 'Dec'), ('Dezember', 'December'), ('Feber', 'February'), ('Juli', 'July'), ('Juni', 'June'), ('J\xe4n', 'Jan'), ('J\xe4nner', 'January'), ('Mai', 'May'), ('M\xe4r', 'Mar'), ('M\xe4rz', 'March'), ('Okt', 'Oct'), ('Oktober', 'October')]
>>> with TFL.I18N.test_language ("de") :
... print_prepr (sorted (CAL.G8R.Months.LC.map.items ()))
[('dez', 'dec'), ('dezember', 'december'), ('feber', 'february'), ('juli', 'july'), ('juni', 'june'), ('j\xe4n', 'jan'), ('j\xe4nner', 'january'), ('mai', 'may'), ('m\xe4r', 'mar'), ('m\xe4rz', 'march'), ('okt', 'oct'), ('oktober', 'october')]
>>> with TFL.I18N.test_language ("de") :
... print (CAL.G8R.Units.replacer.regexp._pattern.pattern)
(?:\b|(?<=\d))(mikrosekunden|mikrosekunde|schalttage|wochentag|sekunden|jahrtag|minuten|quartal|sekunde|stunden|monate|stunde|wochen|jahre|monat|woche|jahr|tage|sek|tag|kw|j|t)(?:\b|(?=\d))
>>> with TFL.I18N.test_language ("de") :
... print_prepr (sorted (CAL.G8R.Week_Days.keys))
['Fr', 'Fri', 'Friday', 'Mo', 'Mon', 'Monday', 'Sa', 'Sat', 'Saturday', 'Su', 'Sun', 'Sunday', 'Th', 'Thu', 'Thursday', 'Tu', 'Tue', 'Tuesday', 'We', 'Wed', 'Wednesday']
>>> with TFL.I18N.test_language ("de") :
... print_prepr (sorted (CAL.G8R.Week_Days.map.items ()))
[('Di', 'Tu'), ('Dienstag', 'Tuesday'), ('Do', 'Th'), ('Donnerstag', 'Thursday'), ('Fr', 'Fri'), ('Freitag', 'Friday'), ('Mi', 'We'), ('Mittwoch', 'Wednesday'), ('Mo', 'Mo'), ('Montag', 'Monday'), ('Sa', 'Sa'), ('Samstag', 'Saturday'), ('So', 'Su'), ('Sonntag', 'Sunday'), ('fr', 'Fr')]
>>> with TFL.I18N.test_language ("de") :
... print_prepr (sorted (CAL.G8R.Week_Days.LC.map.items ()))
[('di', 'tu'), ('dienstag', 'tuesday'), ('do', 'th'), ('donnerstag', 'thursday'), ('fr', 'fr'), ('freitag', 'friday'), ('mi', 'we'), ('mittwoch', 'wednesday'), ('mo', 'mo'), ('montag', 'monday'), ('sa', 'sa'), ('samstag', 'saturday'), ('so', 'su'), ('sonntag', 'sunday')]
>>> with TFL.I18N.test_language ("de") :
... print (CAL.G8R.Week_Days.replacer.regexp._pattern.pattern)
\b(Donnerstag|Dienstag|Mittwoch|Freitag|Samstag|Sonntag|Montag|Di|Do|Fr|Mi|Mo|Sa|So|fr)\b
>>> with TFL.I18N.test_language ("de") :
... print (CAL.G8R.Week_Days.LC.replacer.regexp._pattern.pattern)
\b(donnerstag|dienstag|mittwoch|freitag|samstag|sonntag|montag|di|do|fr|mi|mo|sa|so)\b
>>> _show (CAL.G8R.Recurrence_Units, "weekly")
de : weekly --> weekly
>>> _show (CAL.G8R.Recurrence_Units, "Wรถchentlich")
de : Wรถchentlich --> Weekly
>>> _show (CAL.G8R.Recurrence_Units.LC, "Wรถchentlich")
de : Wรถchentlich --> weekly
>>> with TFL.I18N.test_language ("de") :
... print_prepr (sorted (CAL.G8R.Recurrence_Units.map.items ()))
[('J\xe4hrlich', 'Yearly'), ('Monatlich', 'Monthly'), ('T\xe4glich', 'Daily'), ('W\xf6chentlich', 'Weekly')]
>>> with TFL.I18N.test_language ("de") :
... print_prepr (sorted (CAL.G8R.Recurrence_Units.LC.map.items ()))
[('j\xe4hrlich', 'yearly'), ('monatlich', 'monthly'), ('t\xe4glich', 'daily'), ('w\xf6chentlich', 'weekly')]
"""
from _CAL import CAL
from _TFL import TFL
from _TFL.I18N import _
import _TFL.G8R
Month_Abbrs = TFL.G8R \
( [ _("Jan"), _("Feb"), _("Mar"), _("Apr"), _("May"), _("Jun")
, _("Jul"), _("Aug"), _("Sep"), _("Oct"), _("Nov"), _("Dec")
]
)
Month_Names = TFL.G8R \
( [ _("January"), _("February"), _("March")
, _("April"), _("May"), _("June")
, _("July"), _("August"), _("September")
, _("October"), _("November"), _("December")
]
)
Months = TFL.G8R (Month_Abbrs.words, Month_Names.words)
Recurrence_Units = TFL.G8R \
( [ _("Daily"), _("Weekly"), _("Monthly"), _("Yearly")])
Units_Abs = TFL.G8R \
( [_("year"), _("month"), _("day")]
, [_("hour"), _("minute"), _("second"), _ ("microsecond")]
, lowercase = True
, re_head = r"(?:\b|(?<=\d))" # look-behind assertion must be fixed width
, re_tail = r"(?:\b|(?=\d))"
)
Units_Abs_Abbr = TFL.G8R \
( [_("y"), _("d")]
, [_("h"), _("m"), _("min"), _("s"), _("sec")]
, lowercase = True
, re_head = r"(?:\b|(?<=\d))" # look-behind assertion must be fixed width
, re_tail = r"(?:\b|(?=\d))"
)
Units_Delta = TFL.G8R \
( [_("years"), _("months"), _("days")]
, [_("hours"), _("minutes"), _("seconds"), _ ("microseconds")]
, lowercase = True
, re_head = r"(?:\b|(?<=\d))" # look-behind assertion must be fixed width
, re_tail = r"(?:\b|(?=\d))"
)
Units_YD = TFL.G8R \
( [_("yearday"), _("nlyearday"), _("leapdays")]
, lowercase = True
, re_head = r"(?:\b|(?<=\d))" # look-behind assertion must be fixed width
, re_tail = r"(?:\b|(?=\d))"
)
Units = TFL.G8R \
( Units_Abs.words
, Units_Abs_Abbr.words
, Units_Delta.words
, [ _("wk"), _("week"), _("weeks"), _("weekday")]
, [ _("q"), _("quarter")]
, Units_YD.words
, lowercase = True
, re_head = r"(?:\b|(?<=\d))" # look-behind assertion must be fixed width
, re_tail = r"(?:\b|(?=\d))"
)
Week_Day_Abbrs_2 = TFL.G8R \
( [ _("Mo"), _("Tu"), _("We"), _("Th"), _("Fr"), _("Sa"), _("Su")]
, re_tail = r"(?:\b|(?=\(-?\d+\)))"
)
Week_Day_Abbrs_3 = TFL.G8R \
( [ _("Mon"), _("Tue"), _("Wed"), _("Thu"), _("Fri"), _("Sat"), _("Sun")])
Week_Day_Names = TFL.G8R \
( [ _("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday")
, _("Friday"), _("Saturday"), _("Sunday")
]
)
Week_Days = TFL.G8R \
( Week_Day_Abbrs_2.words, Week_Day_Abbrs_3.words, Week_Day_Names.words)
All = TFL.G8R_Multi (Units.LC, Months.LC, Week_Days.LC)
def _show (g8r, text, lang = "de", localized_p = False) :
with TFL.I18N.test_language (lang) :
globalized = g8r.globalized (text)
result = (lang, ":", text, "-->", globalized)
if localized_p :
result += ("-->", g8r.localized (globalized))
print (* result)
# end def _show
if __name__ != "__main__" :
CAL._Export_Module ()
### __END__ CAL.G8R
|
"""Update server attributes"""
import os
from configparser import ConfigParser
from cbw_api_toolbox.cbw_api import CBWApi
CONF = ConfigParser()
CONF.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'api.conf'))
CLIENT = CBWApi(CONF.get('cyberwatch', 'url'), CONF.get('cyberwatch', 'api_key'), CONF.get('cyberwatch', 'secret_key'))
SERVER_ID = '' #add the appropriate id server
INFO = {
"category": '',
"description": "",
"environment": {}, # Environment object
"deploying_period": "",
"ignoring_policy": "",
"compliance_groups": [], # An array of of the compliance groups IDs you want to set on your
# server split by ',' (ex: [13, 20])
"groups": [] # An array of groups IDs you want to set on your
# server split by ',' (ex: [1, 2])
}
CLIENT.update_server(SERVER_ID, INFO)
|
# -*- coding: utf-8 -*-
__version__ = '0.1.2'
__author__ = 'Sedad Delalic'
__email__ = '[email protected]'
from flask import Blueprint, redirect
from flask_security import Security, SQLAlchemyUserDatastore
from flask_admin import Admin
from flask_security import current_user, logout_user
from flask import current_app
from flask_xadmin.xadm_lib import set_edit_mode, is_super_admin
from flask_xadmin.xadm_lib import xAdminIndexView, xEditModeView, xModelView, current_edit_mode, is_user_authenticated
xadm_app = Blueprint('xadm_app', __name__, template_folder='templates')
# wrap admin
def gen_xadmin(app, title, db, user_model, role_model, views=[]):
db.init_app(app)
# init_login()
user_datastore = SQLAlchemyUserDatastore(db=db, user_model=user_model, role_model=role_model)
security = Security(app, user_datastore)
xadmin = Admin(app, title, index_view=xAdminIndexView(url='/xadmin'), base_template='index.html')
for v in views:
xadmin.add_view(v)
# Add view for enter/leave edit mode
xadmin.add_view(xEditModeView(name='EditMode'))
return xadmin
@xadm_app.before_app_request
def reset_views():
""" Before each request - reset permissions for views, regarding edit_mode """
if not is_user_authenticated():
set_edit_mode(False)
else:
if not is_super_admin():
logout_user()
admins = current_app.extensions.get('admin', [])
for adm in admins:
for v in adm._views:
if hasattr(v, 'set_permissions'):
v.set_permissions(current_edit_mode())
@xadm_app.errorhandler(403)
def page_not_found(e):
return redirect('/')
|
import pytest
from pytest_lazyfixture import lazy_fixture
import numpy as np
from copy import deepcopy
@pytest.mark.parametrize('rf_classifier',
[lazy_fixture('iris_data')],
indirect=True,
ids='clf=rf_{}'.format,
)
@pytest.mark.parametrize('at_defaults', (0.9, 0.95), indirect=True)
def test_anchor_base_beam(rf_classifier, at_defaults, at_iris_explainer):
# inputs
n_anchors_to_sample = 6
coverage_samples = 500
dummy_coverage = - 0.55 # used to test coverage updates on sampling
X_test, explainer, predict_fn, predict_type = at_iris_explainer
explain_defaults = at_defaults
threshold = explain_defaults['desired_confidence']
explanation = explainer.explain(X_test[0], threshold=threshold, **explain_defaults)
anchor_beam = explainer.mab
assert anchor_beam.state['coverage_data'].shape[0] == explain_defaults['coverage_samples']
# Test draw_samples method
anchor_features = list(explainer.samplers[0].enc2feat_idx.keys())
anchor_max_len = len(anchor_features)
assert anchor_beam.state['coverage_data'].shape[1] == anchor_max_len
to_sample = []
for _ in range(n_anchors_to_sample):
anchor_len = np.random.randint(0, anchor_max_len)
anchor = np.random.choice(anchor_features, anchor_len, replace=False)
to_sample.append(tuple(anchor))
to_sample = list(set(to_sample))
current_state = deepcopy(anchor_beam.state)
for anchor in to_sample:
if anchor not in current_state['t_nsamples']:
anchor_beam.state['t_coverage'][anchor] = dummy_coverage
pos, total = anchor_beam.draw_samples(to_sample, explain_defaults['batch_size'])
for p, t, anchor in zip(pos, total, to_sample):
assert anchor_beam.state['t_nsamples'][anchor] == current_state['t_nsamples'][anchor] + t
assert anchor_beam.state['t_positives'][anchor] == current_state['t_positives'][anchor] + p
if anchor: # empty anchor has dummy coverage
assert anchor_beam.state['t_coverage'][anchor] != dummy_coverage
# testing resampling works
# by sampling all features, we are guaranteed that partial anchors might not exist
feat_set = tuple(range(anchor_max_len))
pos, total = anchor_beam.draw_samples([feat_set], explain_defaults['batch_size'])
assert 'placeholder' not in explanation['raw']['examples']
assert -1 not in explanation['raw']['coverage']
# test anchor construction
len_1_anchors_set = anchor_beam.propose_anchors([])
assert len(len_1_anchors_set) == anchor_max_len
len_2_anchors_set = anchor_beam.propose_anchors(len_1_anchors_set)
assert len(len_2_anchors_set) == anchor_max_len * (anchor_max_len - 1) / 2
# test coverage data sampling
cov_data = anchor_beam._get_coverage_samples(coverage_samples)
assert cov_data.shape[0] == coverage_samples
|
'''
.. console - Comprehensive utility library for ANSI terminals.
.. ยฉ 2018, Mike Miller - Released under the LGPL, version 3+.
Experimental terminfo support, under construction.
Enables terminfo sequence lookup with console::
import console.terminfo
or use the environment variable::
PY_CONSOLE_USE_TERMINFO=1
'''
try:
from curses import setupterm, tigetstr
setupterm()
# ------------------------------------------------------
from . import constants
constants.BEL = tigetstr('bel')
constants.BS = tigetstr('kbs')
constants.CR = tigetstr('cr')
constants.HT = tigetstr('ht')
constants.LF = tigetstr('ind')
# ------------------------------------------------------
from . import screen
Screen = screen.Screen
Screen.cuu = tigetstr('cuu')
except ModuleNotFoundError:
raise ModuleNotFoundError('''Curses/terminfo not installed, see:
- https://pypi.org/project/windows-curses/
- https://www.lfd.uci.edu/~gohlke/pythonlibs/#curses
''')
|
#####
from model.Data import UsFo
import re
class UserHelper:
def __init__(self, app):
self.app = app
def Open_home_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/") and len(wd.find_elements_by_name("searchform")) > 0):
wd.get("http://localhost/addressbook/")
def Add_user(self, user):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
self.fill_user_form(user)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.Return_home_page()
self.user_cache = None
def fill_user_form(self, user):
wd = self.app.wd
self.change_field_value("firstname", user.firstname)
self.change_field_value("lastname", user.lastname)
self.change_field_value("address", user.address)
self.change_field_value("home", user.homephone)
self.change_field_value("mobile", user.mobilephone)
self.change_field_value("work", user.workphone)
self.change_field_value("phone2", user.secondaryphone)
self.change_field_value("email", user.email)
self.change_field_value("email2", user.email2)
self.change_field_value("email3", user.email3)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def select_first(self):
self.select_user_by_index()
def select_user_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_user_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def Edit_user(self):
self.Edit_user_by_index(0)
def Edit_user_by_index(self, index, new_user_data):
wd = self.app.wd
self.select_user_by_index(index)
wd.find_elements_by_xpath("//a[contains(@href,'edit.php?id=')]")[index].click()
self.fill_user_form(new_user_data)
# Submit group creation
wd.find_element_by_name("update").click()
self.user_cache = None
def Edit_user_by_id(self, id, new_user_data):
wd = self.app.wd
self.select_user_by_id(id)
wd.find_element_by_xpath("//a[contains(@href, %s) and contains(@href, 'edit.php?id=')]" % id).click()
self.fill_user_form(new_user_data)
# Submit group creation
wd.find_element_by_name("update").click()
self.user_cache = None
def delete_first_user(self):
self.delete_user_by_index(0)
def delete_user_by_index(self, index):
wd = self.app.wd
self.select_user_by_index(index)
wd.find_element_by_css_selector("input[value=Delete]").click()
wd.switch_to_alert().accept()
self.Open_home_page()
self.user_cache = None
def delete_user_by_id(self, id):
wd = self.app.wd
self.select_user_by_id(id)
wd.find_element_by_css_selector("input[value=Delete]").click()
wd.switch_to_alert().accept()
self.Open_home_page()
self.user_cache = None
def Return_home_page(self):
wd = self.app.wd
wd.find_element_by_link_text("home page").click()
def counts(self):
wd = self.app.wd
self.Open_home_page()
return len(wd.find_elements_by_name("selected[]"))
user_cache = None
def get_user_list(self):
if self.user_cache is None:
wd = self.app.wd
self.Open_home_page()
self.user_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
firstname = cells[2].text
lastname = cells[1].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
self.user_cache.append(UsFo(firstname=firstname, lastname=lastname, id=id, address=address,
all_emails_from_home_page=all_emails,
all_phones_from_home_page=all_phones))
return list(self.user_cache)
def open_user_to_edit_by_index(self, index):
wd = self.app.wd
self.app.Open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_user_view_by_index(self, index):
wd = self.app.wd
self.app.Open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_user_info_from_edit_page(self, index):
wd = self.app.wd
self.open_user_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
secondaryphone = wd.find_element_by_name("phone2").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return UsFo(firstname=firstname, lastname=lastname, id=id, address=address,
homephone=homephone, mobilephone=mobilephone, workphone=workphone,
secondaryphone=secondaryphone, email=email, email2=email2, email3=email3)
def get_user_from_view_page(self, index):
wd = self.app.wd
self.open_user_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
secondaryphone = re.search("P: (.*)", text).group(1)
return UsFo(homephone=homephone, mobilephone=mobilephone,
workphone=workphone, secondaryphone=secondaryphone)
def add_to_group(self, group_id, user_id):
wd = self.app.wd
self.select_user_by_id(user_id)
self.select_group_in_dropdown(group_id)
wd.find_element_by_name("add").click()
self.app.Open_home_page()
def filter_for_group(self, id):
wd = self.app.wd
wd.find_element_by_name("group").click()
wd.find_element_by_xpath("//select[@name='group']//option[@value='%s']" % id).click()
def remove_from_group(self, group_id, user_id):
wd = self.app.wd
self.filter_for_group(group_id)
self.select_user_by_id(user_id)
wd.find_element_by_name("remove").click()
def select_group_in_dropdown(self, id):
wd = self.app.wd
wd.find_element_by_xpath("//select[@name='to_group']//option[@value='%s']" % id).click()
|
import sys
import serial.tools.miniterm
def open_terminal(_port="COM11", reset=1):
control_signal = '0' if reset else '1'
control_signal_b = not reset
sys.argv = [sys.argv[0], _port, '115200', '--dtr='+control_signal, '--rts='+control_signal, '--filter=direct']
serial.tools.miniterm.main(default_port=_port, default_baudrate=115200, default_dtr=control_signal_b, default_rts=control_signal_b)
sys.exit(0)
if __name__ == "__main__":
open_terminal() |
#!/usr/bin/env python
from __future__ import print_function
import glob
import sys
import time
from k40nano import NanoPlotter, SvgPlotter, PngPlotter, FileWriteConnection, PrintConnection, MockUsb
from EgvParser import parse_egv
from GcodeParser import parse_gcode
from PngParser import parse_png
NANO_VERSION = "0.0.5"
class NanoCommand:
def __init__(self):
self.title = None
self.wait = 0
self.use_laser = False
self.absolute = False
self.positions = None
self.input_file = None
self.command = None
self.speed = None
self.settings = {}
class Nano:
def __init__(self, arguments):
self.plotter = None
self.log = print
self.speed = None
if arguments is None:
arguments = []
arguments.append("-e") # always execute the stack.
self.elements = list(reversed(arguments))
if len(arguments) == 2:
self.elements = ["-h"]
self.command_lookup = {
"-i": self.command_input,
"-o": self.command_output,
"-p": self.command_passes,
"-m": self.command_move,
"-M": self.command_move_abs,
"-c": self.command_cut,
"-C": self.command_cut_abs,
"-s": self.command_speed,
"-w": self.command_wait,
"-e": self.command_execute,
"-l": self.command_list,
"-r": self.command_home,
"-u": self.command_unlock,
"-U": self.command_lock,
"-q": self.command_quiet,
"-v": self.command_verbose,
"-h": self.command_help,
}
def command_help(self, values):
print("Nano v.", NANO_VERSION)
print("-i [<input>]*, loads egv/png files")
print("-o [<egv/png/svg>]?, sets output method")
print("-p [n], sets the number of passes")
print("-m ([dx] [dy])+, relative move command")
print("-M ([x] [y])+, absolute move command")
print("-c ([dx] [dy])+, relative cut command")
print("-C ([x] [y])+, absolute cut command")
print("-s [+/-]?<speed> [step]*, sets the speed")
print("-w [seconds], wait_time")
print("-e, executes stack")
print("-l, lists stack")
print("-r, resets to home position")
print("-u, unlock rail")
print("-U, lock rail")
print("-v, verbose mode (default)")
print("-q, quiet mode")
print("-h, display this message")
print("")
return values
def get(self):
return self.elements.pop()
def v(self):
if not self.elements:
return None
if self.elements[-1] not in self.command_lookup:
return self.get()
else:
return None
def execute(self):
values = []
while self.elements:
command = self.get()
if command not in self.command_lookup:
continue
values = self.command_lookup[command](values)
if self.plotter is not None:
self.plotter.close()
def command_input(self, values):
v = self.v()
input_files = glob.glob(v)
properties = {}
while True:
key = self.v()
if key is None:
break
value = self.v()
if value is None:
break
properties[key] = value
for input_file in input_files:
m = NanoCommand()
m.title = "File:" + input_file
self.log(m.title)
m.input_file = input_file
m.settings = properties
values.append(m)
return values
@staticmethod
def unit_convert(value):
if value.endswith("in"):
return int(round(1000 * float(value[:-2])))
elif value.endswith("mm"):
return int(round(39.3701 * float(value[:-2])))
elif value.endswith("cm"):
return int(round(393.701 * float(value[:-2])))
elif value.endswith("ft"):
return int(round(12000 * float(value[:-2])))
return int(value)
def command_move(self, values):
m = NanoCommand()
m.positions = []
m.title = "Move Relative: "
while True:
x = self.v()
if x is None:
break
y = self.v()
if y is None:
break
x = self.unit_convert(x)
y = self.unit_convert(y)
m.positions.append([x, y])
m.title += "(%i,%i) " % (x, y)
self.log(m.title)
m.use_laser = False
m.absolute = False
values.append(m)
return values
def command_move_abs(self, values):
m = NanoCommand()
m.positions = []
m.title = "Move Absolute: "
while True:
x = self.v()
if x is None:
break
y = self.v()
if y is None:
break
x = self.unit_convert(x)
y = self.unit_convert(y)
m.positions.append([x, y])
m.title += "(%i,%i) " % (x, y)
self.log(m.title)
m.use_laser = False
m.absolute = True
values.append(m)
return values
def command_cut(self, values):
m = NanoCommand()
m.positions = []
m.title = "Cut Relative: "
while True:
x = self.v()
if x is None:
break
y = self.v()
if y is None:
break
x = self.unit_convert(x)
y = self.unit_convert(y)
m.positions.append([x, y])
m.title += "(%i,%i) " % (x, y)
self.log(m.title)
m.use_laser = True
m.absolute = False
values.append(m)
return values
def command_cut_abs(self, values):
m = NanoCommand()
m.positions = []
m.title = "Cut Absolute: "
while True:
x = self.v()
if x is None:
break
y = self.v()
if y is None:
break
x = self.unit_convert(x)
y = self.unit_convert(y)
m.positions.append([x, y])
m.title += "(%i,%i) " % (x, y)
self.log(m.title)
m.use_laser = True
m.absolute = True
values.append(m)
return values
def command_speed(self, values):
m = NanoCommand()
speed = self.v()
if speed.startswith("-"):
m.absolute = False
m.title = "Change Speed by: -%f" % float(speed)
elif speed.startswith("+"):
m.absolute = False
m.title = "Change Speed by: +%f" % float(speed)
else:
m.absolute = True
m.title = "Speed: %f" % float(speed)
m.speed = float(speed)
self.log(m.title)
values.append(m)
return values
def command_wait(self, values):
m = NanoCommand()
m.wait = float(self.v())
values.append(m)
m.title = "Pause for: %f seconds" % m.wait
self.log(m.title)
values.append(m)
return values
def command_passes(self, values):
self.log("Stack:", len(values))
new_values = []
count = int(self.v())
for i in range(0, count):
for value in values:
new_values.append(value)
self.log("Stack Count:", len(values), " -> ", len(new_values))
return new_values
def command_list(self, values):
for value in values:
if value.title is not None:
print(value.title)
return values
def get_plotter(self):
if self.plotter is None:
self.plotter = NanoPlotter()
self.plotter.open()
return self.plotter
def command_execute(self, values):
self.log("Executing:", len(values))
for value in values:
if value.positions is not None:
plotter = self.get_plotter()
if self.speed is not None:
try:
plotter.enter_compact_mode(value.speed)
except AttributeError:
pass
if value.use_laser:
plotter.down()
for pos in value.positions:
if value.absolute:
self.plotter.move_abs(pos[0], pos[1])
else:
self.plotter.move(pos[0], pos[1])
if value.use_laser:
plotter.up()
if value.wait != 0:
time.sleep(value.wait)
if value.speed is not None:
plotter = self.get_plotter()
if value.absolute:
new_speed = value.speed
else:
new_speed = self.speed + value.speed
if new_speed != value.speed:
try:
plotter.exit_compact_mode_reset()
except AttributeError:
pass
self.speed = new_speed
if value.input_file is not None:
fname = str(value.input_file).lower()
if fname.endswith(".egv"):
plotter = self.get_plotter()
print(value.settings)
parse_egv(value.input_file, plotter, value.settings)
elif fname.endswith(".png"):
plotter = self.get_plotter()
print(value.settings)
parse_png(value.input_file, plotter, value.settings)
elif fname.endswith(".gcode") or fname.endswith(".nc"):
plotter = self.get_plotter()
print(value.settings)
parse_gcode(value.input_file, plotter, value.settings)
if value.command is not None:
value.command()
return []
def command_home(self, values):
m = NanoCommand()
m.title = "Home Position"
self.log(m.title)
m.command = self.home_function
values.append(m)
return values
def command_unlock(self, values):
m = NanoCommand()
m.title = "Unlock Rail"
self.log(m.title)
m.command = self.unlock_function
values.append(m)
return values
def command_lock(self, values):
m = NanoCommand()
m.title = "Lock Rail"
self.log(m.title)
m.command = self.lock_function
values.append(m)
return values
def home_function(self):
try:
plotter = self.get_plotter()
plotter.home()
except AttributeError:
pass
def unlock_function(self):
try:
plotter = self.get_plotter()
plotter.unlock_rail()
except AttributeError:
pass
def lock_function(self):
try:
plotter = self.get_plotter()
plotter.lock_rail()
except AttributeError:
pass
def command_output(self, values):
value = self.v()
if value is None:
self.plotter = NanoPlotter()
self.log("NanoPlotter")
else:
value = str(value).lower()
if value.endswith("svg"):
self.plotter = SvgPlotter(value)
self.plotter.open()
self.log("Svg Plotter")
elif value.endswith("png"):
self.plotter = PngPlotter(open(value, "wb+"))
self.plotter.open()
self.log("Png Plotter")
elif value.endswith("egv"):
self.plotter = NanoPlotter(connection=FileWriteConnection(value))
self.plotter.open()
self.log("Egv NanoPlotter")
elif value == "print":
self.plotter = NanoPlotter(connection=PrintConnection())
self.plotter.open()
self.log("Print NanoPlotter")
elif value == "mock":
self.plotter = NanoPlotter(usb=MockUsb())
self.plotter.open()
self.log("MockUsb NanoPlotter")
return values
def command_quiet(self, values):
self.log = self.no_operation
return values
def command_verbose(self, values):
self.log = print
return values
def no_operation(self, *args):
pass
argv = sys.argv
nano = Nano(argv)
nano.execute()
|
import pytest
from bots.economy.grains import grains_command
@pytest.mark.bots
@pytest.mark.vcr
def test_grains_command(mocker, recorder):
mocker.patch("bots.helpers.uuid_get", return_value="1")
value = grains_command()
recorder.capture(value)
|
from utime import sleep_us
from machine import Pin, PWM, ADC, time_pulse_us
WIFIS = dict({
"": ""})
# directions
STOP = 0
LEFT = 1
RIGHT = 2
FORWARD = 3
BACKWARD = 4
# Battery resistor ladder ratio
BATTERY_COEFF = 2.25
# Ultrasonic sensor calibration
ULTRASONIC_OFFSET = 800
# Servo timing
MOTOR_LEFT_TUNING = 33
MOTOR_RIGHT_TUNING = 33
# Motor PWM-s
pwm_left = PWM(Pin(16), freq=50, duty=0)
pwm_right = PWM(Pin(17), freq=50, duty=0)
# Sharp distance sensors
adc_enemy_left = ADC(Pin(36))
adc_enemy_right = ADC(Pin(37))
# Optek sensors
adc_line_left = ADC(Pin(32))
adc_line_middle = ADC(Pin(35))
adc_line_right = ADC(Pin(34))
# Ultrasonic distance sensor
trigger = Pin(25, Pin.OUT)
echo = Pin(26, Pin.IN)
# Battery gauge
adc_battery = ADC(Pin(33))
from time import sleep
# LED sweep
led_red = Pin(27, Pin.OUT, value=0)
sleep(0.2)
led_orange = Pin(14, Pin.OUT, value=0)
sleep(0.2)
led_green = Pin(12, Pin.OUT, value=0)
sleep(0.2)
led_blue = Pin(13, Pin.OUT, value=0)
sleep(0.2)
led_white = Pin(15, Pin.OUT, value=0)
sleep(0.2)
led_red.value(1)
sleep(0.2)
led_orange.value(1)
sleep(0.2)
led_green.value(1)
sleep(0.2)
led_blue.value(1)
sleep(0.2)
led_white.value(1)
sleep(0.2)
# Set reference voltage to 3.3V
adc_enemy_left.atten(ADC.ATTN_11DB)
adc_enemy_right.atten(ADC.ATTN_11DB)
adc_line_left.atten(ADC.ATTN_11DB)
adc_line_middle.atten(ADC.ATTN_11DB)
adc_line_right.atten(ADC.ATTN_11DB)
adc_battery.atten(ADC.ATTN_11DB)
# Calibrate line sensors
LINE_LEFT_THRESHOLD = adc_line_left.read()
LINE_MIDDLE_THRESHOLD = adc_line_middle.read()
LINE_RIGHT_THRESHOLD = adc_line_right.read()
def battery_voltage():
return round(BATTERY_COEFF * (adc_battery.read() * 3.3 / 4096), 2)
enemy_score = 0
def enemy_distance():
global enemy_score
# send ping
trigger.value(0)
sleep_us(5)
trigger.value(1)
sleep_us(10)
trigger.value(0)
# wait for the pulse and calculate the distance
enemy_distance = (time_pulse_us(echo, 1, 30000) / 2) / 29.1
if enemy_distance < 60 and enemy_distance > 0:
if enemy_score < 5:
enemy_score += 1
else:
if enemy_score > 0:
enemy_score -= 1
# indicate enemy LED
enemy_led.duty(255 if enemy_score > 2 else 0)
return True if enemy_score > 2 else False
def line_left():
return abs(adc_line_left.read() - LINE_LEFT_THRESHOLD) > 1000
def line_right():
return abs(adc_line_right.read() - LINE_RIGHT_THRESHOLD) > 1000
def detach_servos():
motor_left(0)
motor_right(0)
prev_left_speed = 0
def motor_left(speed):
global prev_left_speed
if speed == prev_left_speed:
return
prev_left_speed = speed
assert speed >= -100
assert speed <= 100
pwm_left.duty(int(33 + MOTOR_LEFT_TUNING + speed * 33 / 100)) # -100 ... 100 to 33 .. 102
if speed == 0:
pwm_left.duty(0)
prev_right_speed = 0
def motor_right(speed):
global prev_right_speed
if speed == prev_right_speed:
return
prev_left_speed = speed
assert speed >= -100
assert speed <= 100
pwm_right.duty(int(33 + MOTOR_RIGHT_TUNING + speed * 33 / 100)) # -100 ... 100 to 33 .. 102
if speed == 0:
pwm_right.duty(0)
print("Battery voltage: %.2fV" % battery_voltage())
print("Line sensor thresholds:", LINE_LEFT_THRESHOLD, LINE_MIDDLE_THRESHOLD, LINE_RIGHT_THRESHOLD)
|
from psnawp_api import psnawp_exceptions
# Class Search
# Used to search for users from their PSN ID and get their Online ID
class Search:
base_uri = 'https://www.playstation.com'
def __init__(self, request_builder):
self.request_builder = request_builder
def universal_search(self, search_query):
"""
Searches the Playstation Website. Note: It does not work as of now and the endpoints returns whole html page
:param search_query: search query
:type search_query: str
:returns: search result
:raises PSNAWPIllegalArgumentError: If the search query is empty
"""
# If user tries to do empty search
if len(search_query) <= 0:
raise psnawp_exceptions.PSNAWPIllegalArgumentError('online_id must contain a value.')
params = {'q': search_query, 'smcid': 'web:psn:primary nav:search:{}'.format(search_query)}
response = self.request_builder.get(url='{}/en-us/search'.format(Search.base_uri), params=params)
print(response)
|
import re
def parse(file):
NOTES = 1
DATA = 2
state = NOTES
notes = []
data = []
for row in file:
row = row.strip()
if not row:
continue
if state == NOTES:
if re.match('^Date', row):
state = DATA
else:
# Strip non-ascii characters.
# This should really live in some sort of lib module, but it's only used here so far.
row = ''.join([i if ord(i) < 128 else ' ' for i in row])
notes.append(row)
elif state == DATA:
data.append(row.split(','))
return {
'notes': notes,
'data': data
} |
from webpie import WPApp, WPHandler, WPStaticHandler
import sys, sqlite3, json, time
class DataHandler(WPHandler):
Bin = {
"y": 3600*24*7,
"m": 3600*24,
"w": 3600,
"d": 1200
}
Window = {
"y": 3600*24*365,
"m": 3600*24*30,
"w": 3600*24*7,
"d": 3600*24
}
def stats_by_airline(self, request, relpath, window="w", **args):
bin = self.Bin[window]
now = int(time.time())
t0 = (int(now - self.Window[window])//bin)*bin
t1 = (now//bin)*bin
times = list(range(t0, t1+bin, bin))
#print(times)
db = self.App.db()
c = db.cursor()
# find top 10 airlines
c.execute("""
select t, airline, count(*)
from (
select distinct callsign, airline, (timestamp/?)*? as t from flights
where timestamp >= ? and airline in
(
select airline
from (
select distinct callsign, airline, date from flights
where timestamp >= ?
) as counts
group by airline
order by count(*) desc limit 5
)
) as counts
group by airline, t
order by t, airline
""", (bin, bin, t0, t0))
rows = c.fetchall()
airlines = sorted(list(set(airline for t, airline, n in rows)))
counts_by_time = {t:{a:0 for a in airlines} for t in times}
for t, a, n in rows:
if t == t1: counts_by_time[t][a] = None
else: counts_by_time[t][a] = n/bin*3600
out = {
"airlines": airlines,
"times": times,
"rows": [
[t] + [counts_by_time[t][a] for a in airlines]
for t in times
]
}
return json.dumps(out), "text/json"
class GUIHandler(WPHandler):
def stats_by_airline(self, request, relpath, window="w", **args):
return self.render_to_response("stats_by_airline.html", window=window)
class TopHandler(WPHandler):
def __init__(self, request, app):
WPHandler.__init__(self, request, app)
self.static = WPStaticHandler(request, app, root="static", cache_ttl=60)
self.gui = GUIHandler(request, app)
self.data = DataHandler(request, app)
class App(WPApp):
def __init__(self, handler, dbfilename, **args):
WPApp.__init__(self, handler, **args)
self.DBFile = dbfilename
def init(self):
self.initJinjaEnvironment(tempdirs=["templates"])
def db(self):
return sqlite3.connect(self.DBFile)
application = App(TopHandler, "flights.sqlite")
if __name__ == "__main__":
application.run_server(8899) |
from math import sqrt
co_xa = float(input("Digite a coordenada de x no ponto a: "))
co_xb = float(input("Digite a coordenada de x no ponto b: "))
co_yb = float(input("Digite a coordenada de y no ponto a: "))
co_yb = float(input("Digite a coordenada de y no ponto b: "))
distancia = sqrt((co_xa - co_xb)**2) + ((co_yb - co_yb)**2)
print(f"A distรขncia entre os dois pontos รฉ de: {distancia}")
|
import json
data= 'data/spider/dev.json'
table= 'spider/tables.json'
d= open(data)
t=open(table)
queries= json.load(d)
schema=json.load(t)
# function to add to JSON
#value_list=[]
#db_list=['flight_2', 'wta_1']
#db_list=['concert_singer', 'pets_1', 'car_1', 'flight_2', 'cre_Doc_Template_Mgt', 'course_teach', 'world_1', 'network_1', 'dog_kennels', 'battle_death',
# 'employee_hire_evaluation', 'museum_visit','orchestra','poker_player','real_estate_properties', 'singer', 'student_transcripts_tracking', 'tvshow', 'wta_1']
i= 0
for d in queries:
j = 0
pk_fk_tables = []
db_id_queries = queries[i]["db_id"]
db_id_list = [d for d in schema if d['db_id'] in db_id_queries]
for pk_fk in db_id_list[0]["foreign_keys"]:
for idx, value in enumerate(db_id_list[0]['column_names']):
if pk_fk[0] == idx or pk_fk[1] == idx:
if value[0] not in pk_fk_tables:
pk_fk_tables.append(value[0])
if len(queries[i]['sql']["from"]["conds"]) != 0:
pk= queries[i]['sql']['from']['conds'][0][2][1][1]
fk= queries[i]['sql']["from"]["conds"][0][3][1]
if [pk, fk] in db_id_list[0]["foreign_keys"] or [fk, pk] in db_id_list[0]["foreign_keys"]:
i += 1
elif (db_id_list[0]['column_names'][pk][0] in pk_fk_tables) and (db_id_list[0]['column_names'][fk][0] in pk_fk_tables):
i += 1
else:
print(db_id_list[0]["db_id"], [fk, pk], d['query'])
i += 1
else:
i += 1
# if len(value_list) == 0 :
# value_list.append(q)
# elif (q["query"] not in value_list[i]['query']) and (q['db_id'] in db_list):
# value_list.append(q)
# i += 1
#with open("data/spider/dev_noDuplicates.json", 'w') as new_file:
# json.dump(value_list, new_file, indent=4) #indent formats it into a nice cute json |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-05 22:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('media', models.CharField(max_length=15)),
('details', models.CharField(blank=True, max_length=30)),
('available', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Loan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('return_date', models.DateField()),
('returned', models.BooleanField(default=False)),
('loan_item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='library.Item')),
('loan_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Series',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=110)),
('api_id', models.IntegerField()),
('media_type', models.CharField(choices=[('manga', 'API id is for Manga'), ('anime', 'API id is for Anime')], default='manga', max_length=5)),
('cover_link', models.URLField()),
('synopsis', models.TextField()),
('ani_link', models.URLField()),
('mal_link', models.URLField(blank=True)),
('wiki_link', models.URLField(blank=True)),
],
options={
'verbose_name_plural': 'series',
},
),
migrations.AddField(
model_name='item',
name='parent_series',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='library.Series'),
),
]
|
from pprint import pformat
from lona.view import LonaView
from lona.html import (
TextInput,
CheckBox,
TextArea,
Button,
Select,
HTML,
Div,
H2,
Pre,
)
class DataBindingView(LonaView):
def handle_request(self, request):
check_box = CheckBox(bubble_up=True)
text_input = TextInput(bubble_up=True)
select = Select(
values=[
('', '---'),
('option-a', 'Option A', True),
('option-b', 'Option B', False),
],
bubble_up=True,
)
select_multiple = Select(
values=[
('option-a', 'Option A', True),
('option-b', 'Option B', False),
('option-c', 'Option C'),
],
bubble_up=True,
multiple=True,
)
text_area = TextArea(bubble_up=True)
pre = Pre(
'{}',
style={
'background-color': 'lightgrey',
},
)
html = HTML(
H2('Databinding'),
Div(
Div(
Div(check_box),
Div(text_input),
Div(select),
Div(select_multiple),
Div(text_area),
style={
'float': 'left',
'width': '50%',
},
),
Div(
pre,
Button('Set texts', _id='set-texts'),
Button('Daemonize', _id='daemonize'),
Button('Stop', _id='stop'),
style={
'float': 'left',
'width': '50%',
},
),
),
)
while True:
input_event = self.await_input_event(html=html)
if input_event.node_has_id('set-texts'):
text_input.value = 'test'
text_area.value = 'test'
elif input_event.node_has_id('daemonize'):
self.daemonize()
elif input_event.node_has_id('stop'):
return 'View Stopped'
else:
pre.set_text(
pformat({
'check_box': check_box.value,
'text_input': text_input.value,
'select': select.value,
'select_multiple': select_multiple.value,
'text_area': text_area.value,
})
)
|
import LogicPy.main_functions as main_functions
import LogicPy.conversion as conversion
import LogicPy.gates as gates
import LogicPy.flipflops as flipflops
import LogicPy.combination_logic as combination_logic
import LogicPy.display_terminals as display_terminals
import LogicPy.arithematic_circuit as arithematic_circuit
import LogicPy.counters as counters
import LogicPy.shift_registers as shift_registers |
#!/usr/bin/python
"""Utility to show pywikibot colors."""
#
# (C) Pywikibot team, 2016-2020
#
# Distributed under the terms of the MIT license.
#
import pywikibot
from pywikibot.tools import itergroup
from pywikibot.tools.formatter import color_format
from pywikibot.userinterfaces.terminal_interface_base import colors
def main():
"""Main function."""
fg_colors = [col for col in colors if col != 'default']
bg_colors = fg_colors[:]
n_fg_colors = len(fg_colors)
fg_colors.insert(3 * int(n_fg_colors / 4), 'default')
fg_colors.insert(2 * int(n_fg_colors / 4), 'default')
fg_colors.insert(int(n_fg_colors / 4), 'default')
fg_colors.insert(0, 'default')
# Max len of color names for padding.
max_len_fg_colors = len(max(fg_colors, key=len))
max_len_bc_color = len(max(bg_colors, key=len))
for bg_col in bg_colors:
# Three lines per each backgoung color.
for fg_col_group in itergroup(fg_colors, n_fg_colors / 4 + 1):
line = ''
for fg_col in fg_col_group:
line += ' '
line += color_format('{color}{0}{default}',
fg_col.ljust(max_len_fg_colors),
color='{};{}'.format(fg_col, bg_col))
line = '{} {}'.format(bg_col.ljust(max_len_bc_color), line)
pywikibot.output(line)
pywikibot.output('')
if __name__ == '__main__':
main()
|
import requests
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.font_manager as fm
import os
#์ฌ์ฉ์ ์ค์ ์์ ํฐํธ ๋ฐฐํฌ ํ์
font_location = "C:/Users/rnfek/miniconda3/Lib/site-packages/matplotlib/mpl-data/fonts/ttf/NanumSquareR.ttf"
font_name = fm.FontProperties(fname=font_location).get_name()
mpl.rc('font',family=font_name)
result_path = os.getcwd().replace("\\", '/')
new_directory = result_path + '/Web/pyflask/static/images/Report/'
class MakeReport:
'''
find_code -> make_data -> remake_df
'''
def __init__(self,args):
self.query = args.query
self.base_URL = "https://finance.naver.com/item/main.nhn?code="
def find_code(self,query):
df = pd.read_excel('sub_data/company_code.xlsx',skiprows=3)
df = df.loc[(df['์
์ข
๋ช
'] == 'KOSPI') | (df['์
์ข
๋ช
'] == 'KOSDAQ'), :]
try:
temp_code = df.loc[df['์ข
๋ชฉ๋ช
'] == query, '์ข
๋ชฉ์ฝ๋'].values[0] # series ๋ฐ์ดํฐ๋ idex, value์ ๊ฐ์ ์ง๋๋ค.
code = temp_code.replace("'", "")
except:
code =None
print("There is no code for requested query.")
return code
def remake_df(self, df):
#dataframe ํํ ๋ณํ
index = []
name = []
value = []
for i in range(len(df.index)):
for j,data in enumerate(df.columns):
index.append(df.index[i])
name.append(data)
value.append(df.iloc[i,j])
return index,name,value
def make_data(self):
code = self.find_code(self.query)
if code== None:
return -1
URL = self.base_URL + code
target_company = requests.get(URL)
html = target_company.text
soup = BeautifulSoup(html, 'html.parser')
finance_html = soup.select('div.section.cop_analysis div.sub_section')[0]
th_data = [item.get_text().strip() for item in finance_html.select('thead th')]
annual_date = th_data[3:7]
quarter_date = th_data[7:13]
finance_index = [item.get_text().strip() for item in finance_html.select('th.h_th2')][3:]
finance_data = [item.get_text().strip() for item in finance_html.select('td')]
finance_data = np.array(finance_data)
finance_data.resize(len(finance_index), 10)
finance_date = annual_date + quarter_date
finance = pd.DataFrame(data=finance_data[0:,0:], index=finance_index, columns=finance_date)
annual_finance = finance.iloc[:, :4]
quarter_finance = finance.iloc[:, 4:]
df = np.transpose(quarter_finance.iloc[0:5, :])
df[['์์
์ด์ต๋ฅ ', '์์ด์ต๋ฅ ']] = df[['์์
์ด์ต๋ฅ ', '์์ด์ต๋ฅ ']].apply(pd.to_numeric)
for i in ['์์
์ด์ต','๋งค์ถ์ก','๋น๊ธฐ์์ด์ต']:
df[i] = df[i].str.replace(',','').astype('int64')
# ๋น๊ธฐ์์ด์ต ๋๋ฌด ๊ฐ์ด ์ปค์ 10์ผ๋ก ๋๋
df.iloc[:,0] = df.iloc[:,0]/10
# df2 ๋งค์ถ์ก ์์
์ด์ต ๋น๊ธฐ์์ด์ต
df2 = df.iloc[:,0:3]
index,name,value = self.remake_df(df2)
new_df = pd.DataFrame({'Date': index, 'FinancialStatements': name, 'Value': value})
# df3 ์์
์ด์ต๋ฅ ๋น๊ธฐ์์ต๋ฅ
df3 = df.iloc[:,3:5]
index,name,value = self.remake_df(df3)
new_df2 = pd.DataFrame({'Date':index,'Profit':name,'Value':value})
return new_df,new_df2
def plot_report(self,new_df,new_df2):
sns.barplot(x='Date', y='Value', hue='FinancialStatements', data=new_df) # default : dodge=True
plt.title('FinancialStatements', fontsize=20)
plt.ylabel('one hundred million', fontsize = 10)
plt.legend(fontsize=5,loc=1)
plt.twinx()
sns.lineplot(x='Date', y='Value', hue='Profit',data=new_df2)
plt.ylabel('Percent', fontsize=10)
plt.legend(fontsize=7,loc=7)
plt.savefig(new_directory + "{}_report.png".format(self.query))
print("financial report is saved in the directory\n")
def make_plot(self):
if self.make_data() == -1:
return -1
else:
new_df,new_df2 = self.make_data()
if os.path.exists(new_directory+"{}_report.png".format(self.query)):
print("already exists")
return 0
else:
self.plot_report(new_df,new_df2)
return 0
|
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_contact(app):
app.contact.create(Contact(fname="Albert", sname="Ivanovich", lname="Petrov", address="Nikolaevskiy avenu",
email="[email protected]", tel="+79994447878"))
|
# The example is based on the coco example in
# https://www.dlology.com/blog/how-to-train-detectron2-with-custom-coco-datasets/
import torch # pylint: disable=import-error
import numpy as np
import bentoml
import sys
import traceback
from typing import Dict
from bentoml.frameworks.detectron import DetectronModelArtifact
from bentoml.adapters import ImageInput
from detectron2.data import transforms as T # pylint: disable=import-error
def get_traceback_list():
exc_type, exc_value, exc_traceback = sys.exc_info()
return traceback.format_exception(exc_type, exc_value, exc_traceback)
@bentoml.env(infer_pip_packages=True)
@bentoml.artifacts([DetectronModelArtifact('model')])
class DetectronClassifier(bentoml.BentoService):
@bentoml.api(input=ImageInput(), batch=False)
def predict(self, original_image: np.ndarray) -> Dict:
_aug = T.ResizeShortestEdge([800, 800], 1333)
height, width = original_image.shape[:2]
image = _aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.artifacts.model([inputs])[0]
pred_instances = predictions["instances"]
boxes = (pred_instances.pred_boxes).to("cpu").tensor.detach().numpy()
scores = (pred_instances.scores).to("cpu").detach().numpy()
pred_classes = (pred_instances.pred_classes).to("cpu").detach().numpy()
pred_masks = (pred_instances.pred_masks).to("cpu").detach().numpy()
result = {
"boxes": boxes,
"scores": scores,
"classes": pred_classes,
"masks": pred_masks,
}
return result
|
import os
r = os.system('./kindlegen ./OEPUB/content.opf') |
import json
from jinja2 import BaseLoader, TemplateNotFound
from sectile import Sectile
class SectileLoader(BaseLoader):
def __init__(self, fragments_dir):
self.sectile = Sectile(fragments=fragments_dir)
self.prepared_path = None
self.prepared_base = None
self.prepared_dimensions = None
self.cache = {}
def dimensions(self):
return self.sectile.get_dimensions_list()
def generate_template(self, path, base_template, **dimensions):
fingerprint = '%s-%s-%s' % (
path,
base_template,
json.dumps(dimensions)
)
content, fragments = self.sectile.generate(
path,
base_template,
**dimensions
)
self.cache = {
'path': path,
'fingerprint': fingerprint,
'base_template': base_template,
'dimensions': dimensions,
'content': content,
'fragments': fragments,
}
return self.cache
def prepare_template(self, path, base_template, **dimensions):
generated = self.generate_template(path, base_template, **dimensions)
return generated['fingerprint']
def get_source(self, environment, fingerprint):
if self.cache['fingerprint'] != fingerprint:
raise TemplateNotFound(
"%s, %s {%s}" % (
self.prepared_path,
self.prepared_base,
self.prepared_dimensions
)
)
else:
return self.cache['content'], None, None
|
#!/usr/bin/env python3
import numpy as np
# import matplotlib.pyplot as plt
import operator as op
from src.sdesolver import Solver
import time
if __name__ == '__main__':
L = 9
N = 2
T = np.array([[(5, 0), (5, 0)],
[(5, 0), (5, np.inf)],
[(5, -np.inf), (5, 0)],
[(-np.inf, 0), (5, 0)],
[(5, 0), (np.inf, 0)],
[(-np.inf, -np.inf), (5, 0)],
[(-np.inf, 0), (5, np.inf)],
[(5, -np.inf), (np.inf, 0)],
[(5, 0), (np.inf, np.inf)]])
T = T.reshape(L, 2, N)
Tops = [[(op.ge, op.ge), (op.le, op.le)],
[(op.ge, op.gt), (op.le, op.lt)],
[(op.ge, op.gt), (op.le, op.lt)],
[(op.gt, op.ge), (op.lt, op.le)],
[(op.gt, op.ge), (op.lt, op.le)],
[(op.gt, op.gt), (op.lt, op.lt)]*(L-5)]
Tops = np.array([item for sublist in Tops for item in sublist])
Tops = Tops.reshape(L, 2, N)
A = np.append(np.array([[0, 0], [0, 0],
[0, 0], [1, 1],
[0, 0], [1, 1],
[0, 1], [0, 0],
[0, 1], [0, 0]]),
np.array([[0, 1], [1, 1]]*(L-5)))
A = A.reshape(L, N, N)
# print(A)
alpha = 4 # This is a parameter
beta = -10
# Now comes the control input of size B
fx1 = (lambda x: -alpha*np.sign(x-5))
fx2 = (lambda x: beta*np.sign(x))
B = np.array([[fx1(5), fx2(0)],
[fx1(5), fx2(1)],
[fx1(5), fx2(-1)],
[fx1(-5), fx2(0)],
[fx1(6), fx2(0)],
[fx1(-4), fx2(-1)],
[fx1(-4), fx2(1)],
[fx1(6), fx2(-1)],
[fx1(6), fx2(1)]])
B = B.reshape(L, N)
# print(B)
S = np.array([0, 0, 0, 0])
S = S.reshape(N, N)
SB = np.array([1, 1])
SB = SB.reshape(N, )
for c in [1e-2, 1e-3, 1e-4, 1e-5]: # The tolerance constant
ivals = [-5, 5]
M = 1 # The number of montecarlo runs
SIM_TIME = 1.0
toplot = np.array([])
timetaken = np.array([])
name = __file__.split('.')[1].split('/')[1]
name = '/tmp/results/'+name+'new'
dfile = name+'_'+str(c)+'.csv'
dfile2 = name+'_'+str(c)+'time.csv'
print(dfile, dfile2)
# The arrays to hold the final result
for p in range(2, 5):
err = 0
aerr = 0
time1 = 0
time2 = 0
avgdt = 0
avgndt = 0
for i in range(M):
solver = Solver(T, Tops, A, B, S, SB, R=2**p, C=c,
montecarlo=True)
print('Doing 2ฬแต=%d, M=%d, C=%e' % (2**p, i, c))
st = time.time()
vs, ts = solver.simulate(ivals, SIM_TIME)
avgdt += len(ts)
avgndt += len(solver.dts)
time1 += (time.time() - st)
print('simulate done')
st = time.time()
nvs2, nts2 = solver.nsimulate(ivals)
time2 += (time.time() - st)
print('nsimulate done')
err += np.sum(np.square(nvs2[-1] - vs[-1]))
aerr += np.sum(np.abs((nvs2[-1] - vs[-1])/nvs2[-1]))
print('Total square error: %f, %f' % (err, aerr))
print('Total time taken by proposed technique:', time1/M)
print('Total time taken by naive technique:', time2/M)
avgndt = SIM_TIME/(avgndt/M)
print('Average dt:', avgndt)
avgdt = SIM_TIME/(avgdt/M)
print('Average Dt:', avgdt)
# mean_error = np.log(np.sqrt(err/M))
# aerr = aerr/M
# bound = 0.5 * np.log(avgdt)
# bound = 0.5 * np.log((1 + np.log(1/avgdt))) + 0.5 * np.log(avgdt)
# print('Log Error: %f, Log Bound: %f' % (mean_error, bound))
# print('O(bound):', 0.5*np.log(avgdt))
# print('Log error <= Bound', mean_error <= bound)
# Append to the array to plot it later
toplot = np.append(toplot, [[avgdt, np.sqrt(err/M), (aerr/M),
avgndt]])
toplot = toplot.reshape(len(toplot)//4, 4)
timetaken = np.append(timetaken, [[time1/M, time2/M]])
timetaken = timetaken.reshape(len(timetaken)//2, 2)
np.savetxt(dfile, toplot, header='Dt, RMSE, MAPE, dt', fmt='%+10.10f',
delimiter=',')
np.savetxt(dfile2, timetaken, header='PT, NT', fmt='%+10.10f',
delimiter=',')
# xs = [i[0] for i in vs]
# ys = [i[1] for i in vs]
# Plot the output
# plt.plot(ts[2500:3200], xs[2500:3200])
# plt.show()
# plt.plot(ts[2500:3200], ys[2500:3200])
# plt.show()
# print(len(ts))
# plt.plot(xs, ys)
# plt.show()
# TODO: Implement the same with same seed with ordinary EM
# print(solver.path.shape, solver.dts.shape)
# xs = [i[0] for i in nvs2]
# ys = [i[1] for i in nvs2]
# plt.plot(xs, ys)
# plt.show()
|
units = {
1: 'Byte',
2: 'Kylobyte',
3: 'Megabyte',
4: 'Gigabyte',
5: 'Terabyte'
}
class UnitConverter:
def __init__(self) -> None:
self.get_unit_to_provide()
self.get_unit_to_converter()
self.get_value()
self.converter()
def show_units(self) -> None:
for key in units:
print(f'[{key}] - {units[key]}')
def get_unit_to_provide(self) -> None:
print(f'Selecione a unidade de medida para qual fornecerรก um valor:')
self.show_units()
option = int(input('>>> '))
if option not in units.keys():
print(f'Opรงรฃo invรกlida!')
else:
self.unit_provide = units[option]
def get_unit_to_converter(self) -> None:
print(f'Selecione a unidade de medida para qual deseja converter:')
self.show_units()
option = int(input('>>> '))
if option not in units.keys():
print(f'Opรงรฃo invรกlida!')
else:
self.unit_converter = units[option]
def get_value(self) -> None:
print(f'Digite o valor em {self.unit_provide}:')
self.value = float(input('>>> '))
def converter(self) -> None:
for key in units:
if units[key] == self.unit_provide:
unit_provide_num = key
elif units[key] == self.unit_converter:
unit_converter_num = key
if self.unit_provide == self.unit_converter:
print('Selecione unidades diferentes!')
elif unit_provide_num > unit_converter_num:
multiplications = abs(unit_provide_num - unit_converter_num)
print(
f'{self.value} {self.unit_provide} equivalem a {self.value * (1000**multiplications)} {self.unit_converter}')
elif unit_provide_num < unit_converter_num:
divisions = abs(unit_provide_num - unit_converter_num)
print(
f'{self.value} {self.unit_provide} equivalem a {self.value / (1000**divisions)} {self.unit_converter}')
|
import numpy as np
import pytest
import xarray as xr
from pyomeca import Analogs, Angles, Markers, Rototrans
from ._constants import ANALOGS_DATA, EXPECTED_VALUES, MARKERS_DATA
from .utils import is_expected_array
def test_analogs_creation():
dims = ("channel", "time")
array = Analogs()
np.testing.assert_array_equal(x=array, y=xr.DataArray())
assert array.dims == dims
array = Analogs(ANALOGS_DATA.values)
is_expected_array(array, **EXPECTED_VALUES[56])
size = 10, 100
array = Analogs.from_random_data(size=size)
assert array.shape == size
assert array.dims == dims
with pytest.raises(ValueError):
Analogs(MARKERS_DATA)
def test_markers_creation():
dims = ("axis", "channel", "time")
array = Markers()
np.testing.assert_array_equal(x=array, y=xr.DataArray())
assert array.dims == dims
array = Markers(MARKERS_DATA.values)
is_expected_array(array, **EXPECTED_VALUES[57])
size = 3, 10, 100
array = Markers.from_random_data(size=size)
assert array.shape == (4, size[1], size[2])
assert array.dims == dims
with pytest.raises(ValueError):
Markers(ANALOGS_DATA)
def test_angles_creation():
dims = ("axis", "channel", "time")
array = Angles()
np.testing.assert_array_equal(x=array, y=xr.DataArray())
assert array.dims == dims
array = Angles(MARKERS_DATA.values, time=MARKERS_DATA.time)
is_expected_array(array, **EXPECTED_VALUES[57])
size = 10, 10, 100
array = Angles.from_random_data(size=size)
assert array.shape == size
assert array.dims == dims
with pytest.raises(ValueError):
Angles(ANALOGS_DATA)
def test_rototrans_creation():
dims = ("row", "col", "time")
array = Rototrans()
np.testing.assert_array_equal(x=array, y=xr.DataArray(np.eye(4)[..., np.newaxis]))
assert array.dims == dims
data = Markers(MARKERS_DATA.values)
array = Rototrans.from_markers(
origin=data.isel(channel=[0]),
axis_1=data.isel(channel=[0, 1]),
axis_2=data.isel(channel=[0, 2]),
axes_name="xy",
axis_to_recalculate="y",
)
is_expected_array(array, **EXPECTED_VALUES[67])
size = 4, 4, 100
array = Rototrans.from_random_data(size=size)
assert array.shape == size
assert array.dims == dims
with pytest.raises(ValueError):
Angles(ANALOGS_DATA)
|
from django.apps import AppConfig
class UserUiConfig(AppConfig):
name = 'user_ui'
|
import re
import os
import csv
import sys
import json
import time
import glob
import zipfile
import string
import traceback
from pathlib import Path
from typing import Optional
from .utils import flatten_json, get_nearest_value
import requests
import click
from ..settings import GITHUB_TOKEN
from .utils import write_with_size, read_command_type, request_with_limit
from .data import CommitPublic
DATETIME_HEADER = "Date"
validate_models = {"CommitPublic": CommitPublic}
class MaskStructureError(Exception):
"""Raised when mask missmatch with input json."""
pass
def dump_date(date, file_index, path):
file_path = os.path.join(path, f"{file_index}.json")
with open(file_path, "r", newline="", encoding="utf8") as file:
data = json.load(file)
with open(file_path, "w", newline="", encoding="utf8") as file:
data["crawled_at"] = date
json.dump(data, file)
def create_file(init_json, file_index, path):
file_path = os.path.join(path, f"{file_index}.json")
with open(file_path, "w", encoding="utf8") as file:
json.dump(init_json, file)
def validate(data, allowed_data, schema):
"""Take a data structure and apply pydentic model."""
pydentic_class = validate_models[schema]
allowed_data.update(pydentic_class(**data).dict())
def commits_parser(github_commits, repo_id, html_url, schema):
"""
Push commits via validator and add additional fileds.
return list of json string
"""
commits = github_commits.json()
out = list()
for commit in commits:
allowed_data = {"repo_id": repo_id, "repo_html_url": html_url}
if commit:
validate(flatten_json(commit), allowed_data, schema)
out.append(allowed_data)
return commits[0]["sha"], out
def read_repos(repos_dir, file_name, start_id, end_id):
"""
Read repos from file. Filter repos by given repo id range if specified.
"""
repos_file_path = os.path.join(repos_dir, file_name)
# load available repo
if os.path.isfile(repos_file_path):
with open(repos_file_path, "r") as repos_file:
if start_id and end_id:
return [repo for repo in json.load(repos_file)["data"] if repo["id"]]
else:
return json.load(repos_file)["data"]
def create_zip_file(files_dir):
"""
Create zip inside snippets folder
"""
with zipfile.ZipFile(
os.path.join(files_dir, "..", "commits.zip"), "w", zipfile.ZIP_DEFLATED
) as zipf:
for root, dirs, files in os.walk(files_dir):
for file in files:
zipf.write(
os.path.join(root, file),
os.path.relpath(
os.path.join(root, file), os.path.join(files_dir, "..")
),
)
def get_repos_files(repos_dir, start_id, end_id):
"""
Return list of files with repose by given ids range or all files from folder if ids range not set
In order to make sure that all repositories are covered,
add 2 additional files from the beginning of the ordered directory list and from the end
"""
dir_files = os.listdir(repos_dir)
if not dir_files:
raise ("Empty repos dir.")
result_command_type = read_command_type(os.path.join(repos_dir, dir_files[0]))
if start_id and end_id and result_command_type == "crawl":
nerest_start_id = get_nearest_value(dir_files, start_id)
if dir_files.index(f"{nerest_start_id}.json") - 2 <= 0:
start_index = 0
else:
start_file = dir_files.index(f"{nerest_start_id}.json") - 2
nerest_end_id = get_nearest_value(dir_files, end_id)
if dir_files.index(f"{nerest_end_id}.json") + 2 >= len(dir_files):
end_index = -1
else:
end_index = dir_files.index(f"{nerest_end_id}.json") + 2
else:
return dir_files
@click.command(context_settings=dict(help_option_names=["-h", "--help"]))
@click.option(
"--start-id",
"-s",
type=int,
default=None,
help="Start repo id for crawl command output.",
)
@click.option(
"--end-id",
"-e",
type=int,
default=None,
help="End repo id. You need to specify both parameters start and end id. ",
)
@click.option("--crawldir", "-d", default=".", help='Path to save folder. default="." ')
@click.option("--repos-dir", "-r", help="Directory with repos files.")
@click.option(
"--schema",
"-S",
type=click.Choice(list(validate_models.keys())),
default="CommitPublic",
help="Directory with repos files.",
)
@click.option(
"--token",
"-t",
help="Access token for increase rate limit. Read from env $github_token if specify.",
default=None,
)
@click.option(
"--min-rate-limit",
"-l",
type=int,
default=10,
help="Minimum remaining rate limit on API under which the crawl is interrupted",
)
def commits(
start_id: Optional[int],
end_id: Optional[int],
crawldir: str,
repos_dir: str,
schema: str,
token: Optional[str],
min_rate_limit: int,
):
"""
Read repos json file and upload all commits for that repos one by one.
"""
if not os.path.exists(crawldir):
os.makedirs(crawldir)
if not token:
token = GITHUB_TOKEN
headers = {
"accept": "application/vnd.github.v3+json",
}
if GITHUB_TOKEN is not None:
headers["Authorization"] = f"token {token}"
else:
click.echo(f"start with low rate limit")
file_index = 1
files_for_proccessing = get_repos_files(repos_dir, start_id, end_id)
start_block = {"command": "commits", "data": [], "crawled_at": None}
# 2 output idexing csv and commits
commits_path = os.path.join(crawldir, "commits")
csv_out = os.path.join(commits_path, "id_indexes.csv")
if not os.path.exists(commits_path):
os.makedirs(commits_path)
with click.progressbar(files_for_proccessing) as bar, open(
csv_out, mode="wt", encoding="utf8", newline=""
) as output:
fnames = ["file", "commt_hash", "license", "repo_url", "language"]
writer = csv.DictWriter(output, fieldnames=fnames)
writer.writeheader()
for file_name in bar:
repos = read_repos(repos_dir, file_name, start_id, end_id)
if not repos:
continue
create_file(start_block, file_index, commits_path)
for i, repo in enumerate(repos):
# Get commits
commits_responce = request_with_limit(
repo["commits_url"].replace("{/sha}", ""), headers, min_rate_limit
)
sha, commits = commits_parser(
commits_responce, repo["id"], repo["html_url"], schema
)
if repo["license"]:
license = repo["license"]["spdx_id"]
else:
license = repo["license"]
# date of creating that commits file
date = commits_responce.headers.get(DATETIME_HEADER)
# Indexing
writer.writerow(
{
"file": os.path.join("commits", f"{file_index}.json"),
"repo_url": repo["html_url"],
"commt_hash": sha,
"license": license,
"language": repo["language"],
}
)
current_size = write_with_size(commits, file_index, commits_path)
# Size regulation
if current_size > 5000000:
dump_date(date, file_index, commits_path)
file_index += 1
create_file(start_block, file_index, commits_path)
elif i == len(repos) - 1:
dump_date(date, file_index, commits_path)
file_index += 1
create_zip_file(commits_path)
if __name__ == "__main__":
commits()
|
"""
Soundscape information retrieval
Author: Tzu-Hao Harry Lin ([email protected])
"""
import plotly.graph_objects as go
import pandas as pd
def interactive_matrix(input_data, f, vmin=None, vmax=None, x_title=None, y_title=None, x_date=True, figure_title=None, figure_plot=True, html_save=False, html_name='Interactive_matrix.html'):
if x_date:
fig = go.Figure(data=go.Heatmap(z=input_data[:,1:].T,
x=pd.to_datetime(input_data[:,0]-693962, unit='D',origin=pd.Timestamp('1900-01-01')),
y=f,
colorscale='Jet', zmin = vmin, zmax = vmax))
else:
fig = go.Figure(data=go.Heatmap(z=input_data[:,1:].T,
x=input_data[:,0],
y=f,
colorscale='Jet', zmin = vmin, zmax = vmax))
fig.update_layout(title=figure_title, yaxis_title=y_title, xaxis_title=x_title)
if figure_plot:
fig.show()
else:
html_save=True
if html_save:
fig.write_html(file=html_name)
|
# From http://wiki.xentax.com/index.php?title=Blender_Import_Guide
bl_info = {
"name": "Name of the add-on",
"author": "Name of the author of the add-on, some of these are optional",
"location": "File > Import > Name of script",
"description": "Description of the add-on",
"category": "Import-Export"}
from bpy.props import *
from bpy_extras.io_utils import ExportHelper, ImportHelper
class IMPORT_OT_yourformatname(bpy.types.Operator, ImportHelper):
bl_idname= "import_scene.yourformat"
bl_description = 'Your description'
bl_label = "Label for the button in the GUI"
filename_ext = ".yourextension"
filter_glob = StringProperty(default="*.defaultextension", options={'HIDDEN'})
filepath= StringProperty(name="File Path", description="Filepath
used for importing the yourformatname file", maxlen=1024, default="")
def execute(self, context):
yourfunctiontorun()
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def menu_func(self, context):
self.layout.operator(IMPORT_OT_yourformatname.bl_idname, text="your description")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func)
if __name__ == "__main__":
register()
|
import requests
print(requests.__version__)
print(requests.get("http://google.com/"))
x = requests.get("https://raw.github.com/Faiyaz42/CMPUT404/main/script.py")
print(x.text)
|
import pytest
from fastai import *
from fastai.text import *
def text_df(labels):
data = []
texts = ["fast ai is a cool project", "hello world"] * 20
for ind, text in enumerate(texts):
sample = {}
sample["label"] = labels[ind%len(labels)]
sample["text"] = text
data.append(sample)
return pd.DataFrame(data)
def text_csv_file(filepath, labels):
file = open(filepath, 'w', encoding='utf-8')
df = text_df(labels)
df.to_csv(filepath, index=False)
file.close()
return file
def text_files(path, labels):
os.makedirs(path/'temp', exist_ok=True)
texts = ["fast ai is a cool project", "hello world"] * 20
for lbl in labels:
os.makedirs(path/'temp'/lbl, exist_ok=True)
for i,t in enumerate(texts):
with open(path/'temp'/lbl/f'{lbl}_{i}.txt', 'w') as f: f.write(t)
def test_from_folder():
path = untar_data(URLs.IMDB_SAMPLE)
text_files(path, ['pos', 'neg'])
data = (TextList.from_folder(path/'temp')
.random_split_by_pct(0.1)
.label_from_folder()
.databunch())
assert (len(data.train_ds) + len(data.valid_ds)) == 80
assert set(data.classes) == {'neg', 'pos'}
shutil.rmtree(path/'temp')
def test_from_csv_and_from_df():
path = untar_data(URLs.IMDB_SAMPLE)
df = text_df(['neg','pos'])
data1 = TextClasDataBunch.from_df(path, train_df=df, valid_df=df, test_df=df, label_cols=0, text_cols=["text"])
assert len(data1.classes) == 2
df = text_df(['neg','pos','neg pos'])
data2 = TextClasDataBunch.from_df(path, train_df=df, valid_df=df, test_df=df,
label_cols=0, text_cols=["text"], label_delim=' ')
assert len(data2.classes) == 2
x,y = data2.train_ds[0]
assert len(y.data) == 2
text_csv_file(path/'tmp.csv', ['neg','pos'])
data3 = TextLMDataBunch.from_csv(path, 'tmp.csv', test='tmp.csv', label_cols=0, text_cols=["text"])
assert len(data3.classes) == 1
data4 = TextLMDataBunch.from_csv(path, 'tmp.csv', test='tmp.csv', label_cols=0, text_cols=["text"], max_vocab=5)
assert len(data4.train_ds.vocab.itos) == 7 # 5 + 2 (special UNK and PAD token)
os.remove(path/'tmp.csv')
def test_should_load_backwards_lm():
path = untar_data(URLs.IMDB_SAMPLE)
df = text_df(['neg','pos'])
data = TextLMDataBunch.from_df(path, train_df=df, valid_df=df, label_cols=0, text_cols=["text"],
bs=1, backwards=True)
lml = data.train_dl.dl
lml.data = lml.batchify(np.concatenate([lml.dataset.x.items[i] for i in range(len(lml.dataset))]))
batch = lml.get_batch(0, 70)
as_text = [data.train_ds.vocab.itos[x] for x in batch[0]]
np.testing.assert_array_equal(as_text[:2], ["world", "hello"])
def df_test_collate(data):
x,y = next(iter(data.train_dl))
assert x.size(0) == 8
assert x[0,-1] == 1
def test_load_and_save_test():
path = untar_data(URLs.IMDB_SAMPLE)
df = text_df(['neg','pos'])
data = TextClasDataBunch.from_df(path, train_df=df, valid_df=df, test_df=df, label_cols=0, text_cols="text")
data.save()
data1 = TextClasDataBunch.load(path)
assert np.all(data.classes == data1.classes)
assert np.all(data.train_ds.y.items == data1.train_ds.y.items)
str1 = np.array([str(o) for o in data.train_ds.y])
str2 = np.array([str(o) for o in data1.train_ds.y])
assert np.all(str1 == str2)
shutil.rmtree(path/'tmp')
|
#! /usr/bin/env python
#-*- coding:utf-8 -*
from openpyxl.reader.excel import load_workbook
import codecs
import sys
def get_type(i_kind):
if (i_kind == u'ๅบ็จๅ็งฐ'):
return 'fine'
elif (i_kind == u'ๆณ้ๆฑ'):
return 'general'
else:
print 'Error' + i_kind
sys.exit()
def print_score(d_score):
print 'Total Score\t' + 'Total Num\t' + 'Bad num of first\t' + 'Bad num of first perc\t'
for kind in d_score:
item = d_score[kind]
print kind, item[0] / item[1], item[1], item[2], float(item[2]) / item[1]
print ""
t_score = d_score['fine'][0] + d_score['general'][0]
t_num = d_score['fine'][1] + d_score['general'][1]
print 'Total', t_score / t_num
if __name__ == '__main__':
wb = load_workbook(filename = r'evaluation_form.xlsx')
debug_f = codecs.open('debug', 'w', 'gbk')
sheetnames = wb.get_sheet_names()
for name in sheetnames:
print 'sheet ' + name
ws = wb.get_sheet_by_name(sheetnames[2])
print 'title:'+ws.title
print 'sheet rows:' , ws.get_highest_row()
print 'sheet cols:' , ws.get_highest_column()
d_score = {
'fine': [0]*3, #tscore, tnum, badnum 1, badnum total
'general':[0]*3
}
for rx in range(1, 149):
w = [0]*6
for cx in range(0,6):
w[cx] = ws.cell(row = rx, column = cx).value
kind = get_type(w[1])
d_score[kind][0] += w[2]
d_score[kind][1] += 1
if (w[3] != 4):
d_score[kind][2] += 1
print w[0]
print_score(d_score)
|
from time import sleep
import platform
import os
def screen_print(loader=True) -> None:
"""Print title screen with a simple toggle loader"""
intro_text = '- ACME EMPLOYEE PAYMENT CALCULATOR -'
center_values = '*' * ((80 - len(intro_text)) // 2)
print('*' * 80)
print(center_values + intro_text + center_values)
print('*' * 80)
if not loader:
pass
else:
print('Initializing')
for i in range(1, 4):
sleep(1)
print('.' * i)
print('Succeed')
sleep(1)
def clear_os_type() -> None:
"""Checks the current operating system for executing the right clear command to console"""
os_type = platform.system()
if os_type == 'Windows':
os.system('cls')
else:
os.system('clear')
|
from typing import List, Text, Any
import torch.nn as nn
from models.cell_operations import ResNetBasicblock
from models.cell_infers.cells import InferCell
class DynamicShapeTinyNet(nn.Module):
def __init__(self, channels: List[int], genotype: Any, num_classes: int):
super(DynamicShapeTinyNet, self).__init__()
self._channels = channels
if len(channels) % 3 != 2:
raise ValueError('invalid number of layers : {:}'.format(len(channels)))
self._num_stage = N = len(channels) // 3
self.stem = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(channels[0]))
# layer_channels = [C ] * N + [C*2 ] + [C*2 ] * N + [C*4 ] + [C*4 ] * N
layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N
c_prev = channels[0]
self.cells = nn.ModuleList()
for index, (c_curr, reduction) in enumerate(zip(channels, layer_reductions)):
if reduction : cell = ResNetBasicblock(c_prev, c_curr, 2, True)
else : cell = InferCell(genotype, c_prev, c_curr, 1)
self.cells.append( cell )
c_prev = cell.out_dim
self._num_layer = len(self.cells)
self.lastact = nn.Sequential(nn.BatchNorm2d(c_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(c_prev, num_classes)
def get_message(self) -> Text:
string = self.extra_repr()
for i, cell in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return ('{name}(C={_channels}, N={_num_stage}, L={_num_layer})'.format(name=self.__class__.__name__, **self.__dict__))
def forward(self, inputs):
feature = self.stem(inputs)
for i, cell in enumerate(self.cells):
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling( out )
out = out.view(out.size(0), -1)
logits = self.classifier(out)
return out, logits
|
#!/usr/bin/python3
"""
This script adds to an Expression Atlas' EB-eye XML dump tissue and disease information retrieved
from condensed sdrf files.
Author: Robert Petryszak ([email protected])
"""
from xml.dom import minidom
from re import sub, match, IGNORECASE
import os
import sys
import time
import argparse
class EBEyeDumpEnrichmentError(Exception):
pass
def createAppendElement(doc, parentElem, elemName, elemText, elemAttributeTuples=[]):
"""
1. Create xml elemName
2. Populate it with attributes in elemAttributeTuples,
3. Append to elemName a child text node elemText
4. Append elemName as a child of parentElem
"""
el = doc.createElement(elemName)
if (elemText):
el.appendChild(doc.createTextNode(elemText))
for attrVal in elemAttributeTuples:
el.setAttribute(attrVal[0], attrVal[1])
parentElem.appendChild(el)
def retrieveSampleAnnotationsFromCondensedSdrfFile(condensedSdrfFilePath):
"""
Retrieve from condensedSdrfFilePath disease and tissue annotations (including cross-references
to ontology terms) and store them in diseases, tissues and crossRefs sets respectively.
>>> retrieveSampleAnnotationsFromCondensedSdrfFile('!')
Traceback (most recent call last):
...
EBEyeDumpEnrichmentError: ERROR: ! doesn't exist
"""
if os.path.exists(condensedSdrfFilePath):
with open(condensedSdrfFilePath, 'r') as condensedSdrfFile:
condensedSdrfStr = condensedSdrfFile.read()
return retrieveSampleAnnotationsFromCondensedSdrf(condensedSdrfStr)
else:
raise EBEyeDumpEnrichmentError("ERROR: " + condensedSdrfFilePath + " doesn't exist")
"""
Retrieve from condensedSdrfStr disease and tissue annotations (including cross-references
to ontology terms) and store them in diseases, tissues and crossRefs sets respectively.
"""
def retrieveSampleAnnotationsFromCondensedSdrf(condensedSdrfStr):
"""
>>> (diseases, tissues, crossRefs) = retrieveSampleAnnotationsFromCondensedSdrf("")
>>> len(diseases)
0
>>> (diseases, tissues, crossRefs) = retrieveSampleAnnotationsFromCondensedSdrf("E-MTAB-2770\\t\\trun_5637.2\\tfactor\\tcell line\t5637\thttp://www.ebi.ac.uk/efo/EFO_0002096")
>>> len(diseases) + len(tissues) + len(crossRefs)
0
>>> (diseases, tissues, crossRefs) = retrieveSampleAnnotationsFromCondensedSdrf("E-MTAB-2770\\t\\trun_5637.2\\tfactor\\tdisease\\tbladder carcinoma\\thttp://www.ebi.ac.uk/efo/EFO_0000292")
>>> "bladder carcinoma" in diseases
True
>>> "EFO_0000292" in crossRefs
True
>>> tissues
set()
>>> (diseases, tissues, crossRefs) = retrieveSampleAnnotationsFromCondensedSdrf("E-MTAB-513\\t\\tERR030881\\tfactor\\torganism part\\tadrenal\\thttp://purl.obolibrary.org/obo/UBERON_0002369")
>>> "adrenal" in tissues
True
>>> "UBERON_0002369" in crossRefs
True
>>> diseases
set()
"""
diseases, tissues, crossRefs = (set([]), set([]), set([]))
for row in condensedSdrfStr.split("\n"):
arr = row.strip().split("\t")
if len(arr) > 4 and arr[3] == "factor":
if arr[4].lower() == "organism part":
tissues.add(arr[5].strip())
if len(arr) > 6:
crossRefs.add(arr[6].split("/")[-1].strip())
elif arr[4].lower() == "disease":
diseases.add(arr[5].strip())
if len(arr) > 6:
crossRefs.add(arr[6].split("/")[-1].strip())
return (diseases, tissues, crossRefs)
def addSampleAnnotationsToEntry(doc, entry, diseases, tissues, crossRefs):
"""
Add annotations in diseases, tissues, crossRefs to entry
>>> doc = minidom.Document()
>>> entry = doc.createElement('entry')
>>> addSampleAnnotationsToEntry(doc, entry, {'bladder carcinoma'}, {}, {'EFO_0000292'})
>>> entry.getElementsByTagName("additional_fields").length
1
>>> entry.getElementsByTagName("cross_references").length
1
>>> entry.getElementsByTagName("additional_fields").item(0).firstChild.toprettyxml(indent="").strip() == '<field name="disease">bladder carcinoma</field>'
True
>>> entry.getElementsByTagName("cross_references").item(0).firstChild.toprettyxml(indent="").strip() == '<ref dbName="efo" dbkey="EFO_0000292"/>'
True
>>> entry = doc.createElement('entry')
>>> addSampleAnnotationsToEntry(doc, entry, {}, {'adrenal'}, {})
>>> entry.getElementsByTagName("additional_fields").item(0).firstChild.toprettyxml(indent="").strip() == '<field name="tissue">adrenal</field>'
True
"""
if diseases or tissues:
if len(entry.getElementsByTagName("additional_fields")) == 0:
createAppendElement(doc, entry, "additional_fields","")
additionalFields = entry.getElementsByTagName("additional_fields")[0]
if len(entry.getElementsByTagName("cross_references")) == 0:
createAppendElement(doc, entry, "cross_references","")
crossReferences = entry.getElementsByTagName("cross_references")[0]
for tissue in tissues:
createAppendElement(doc, additionalFields, 'field', tissue, [('name','tissue')])
for disease in diseases:
createAppendElement(doc, additionalFields, 'field', disease, [('name','disease')])
for ontologyTerm in crossRefs:
ontologyName = ontologyTerm.split("_")[0].lower()
createAppendElement(doc, crossReferences, 'ref', None, [('dbkey', ontologyTerm),('dbName', ontologyName)])
condensedSDRFRootDir = None
if __name__ == "__main__":
# Capture call arguments
parser = argparse.ArgumentParser(description='Enrich file xmlpath with tissue/disease information from condensed sdrf files in condensed_sdrf_dir. Store the result in xmlpath.enriched.')
parser.add_argument('xml', metavar='xmlpath', nargs=1, help='Path to Atlas XML dump file for EBI Search')
parser.add_argument('--sdrfdir', metavar='condensed_sdrf_dir', nargs=1, help='Directory containing condensed sdrf files')
args = parser.parse_args()
xmlFilePath = args.xml[0]
if args.sdrfdir:
condensedSDRFRootDir = args.sdrfdir[0]
if xmlFilePath == "test":
import doctest
doctest.testmod(verbose=False)
else:
t0 = time.time()
doc = minidom.parse(xmlFilePath)
print(f"Parsed {xmlFilePath} successfully in {round(time.time() - t0)} seconds")
entries = doc.getElementsByTagName('entry')
t0 = time.time()
recCnt = 0
for entry in entries:
accession = entry.attributes['id'].value
condensedSdrfFilePath = os.path.join(condensedSDRFRootDir, accession, "%s.condensed-sdrf.tsv" % accession)
(diseases, tissues, crossRefs) = retrieveSampleAnnotationsFromCondensedSdrfFile(condensedSdrfFilePath)
addSampleAnnotationsToEntry(doc, entry, diseases, tissues, crossRefs)
recCnt += 1
if recCnt % 200 == 0:
print(f"Processed {recCnt} entries - {round(time.time() - t0)} seconds so far")
print("Processed %d %s entries successfully in %d seconds" % (len(entries), xmlFilePath, round(time.time() - t0)))
xmlStr = doc.toprettyxml(indent=" ")
xmlStr = os.linesep.join([s for s in xmlStr.splitlines() if s.strip()])
with open("%s.enriched" % xmlFilePath, "w") as f:
f.write(xmlStr)
|
import os
import subprocess
from kf_model_omop.utils.db import _select_config
from kf_model_omop.config import MODELS_FILE_PATH, ROOT_DIR
def auto_gen_models(config_name=None, refresh_schema=False,
model_filepath=MODELS_FILE_PATH):
"""
Autogenerate the OMOP SQLAlchemy models
Use sqlacodegen to generate models from the db. Then apply customizations
to the models (i.e. add Kids First IDs, etc)
"""
print('\nAuto-generating models ...\n')
# Auto generate models from temp db
generate_models_from_db(model_filepath, config_name)
# Inject customizations into the models Python module
customize_models(model_filepath)
print(f'\nComplete - generated models: {model_filepath}')
def generate_models_from_db(model_filepath, config_name=None):
"""
Use sqlacodegen to generate SQLAlchemy models Python module from Postgres
"""
config = _select_config(config_name=config_name)
cmd = (f'sqlacodegen {config.SQLALCHEMY_DATABASE_URI} '
f'--outfile {model_filepath}')
output = subprocess.run(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output_str = output.stdout.decode('utf-8')
print(output_str)
if output.returncode != 0:
raise Exception(
f'Error in auto_gen_models!\n\n{output_str}')
def customize_models(model_filepath):
"""
Modify models.py generated by generate_models_from_db with customizations
Make all models inherit Base and ModelMixins
Fix bug ConceptClas -> ConceptClass
Add module docstring
"""
# Find/replace things
with open(model_filepath, 'r') as models_file:
models_txt = models_file.read()
models_txt = models_txt.replace('ConceptClas', 'ConceptClass')
# models_txt = models_txt.replace('(Base)', '(Base, ModelMixins)')
# Insert docstring and imports
template_path = os.path.join(ROOT_DIR, 'factory', 'model_template.txt')
with open(template_path, 'r') as template_file:
customized_models_code = template_file.read()
customized_models_code = customized_models_code.format(
models=models_txt)
# Update models.py
with open(model_filepath, 'w') as models_file:
models_file.write(customized_models_code)
|
from django.db import models
import calendar
from datetime import date
class Utility(models.Model):
name = models.CharField(max_length=1024)
def __str__(self):
return str(self.name)
class Bill(models.Model):
utility = models.ForeignKey(Utility, on_delete=models.CASCADE)
charge = models.DecimalField(max_digits=10, decimal_places=2)
start_date = models.DateField()
end_date = models.DateField()
def get_charge_for_month(self, year, month):
days_in_month = calendar.monthrange(year, month)[1]
start = latest_date(date(year, month, 1), self.start_date)
end = earliest_date(date(year, month, days_in_month), self.end_date)
days_for_rent = (end-start).days + 1
return self.charge / days_in_month * days_for_rent
def get_days_for_billing_cycle(self):
return (self.end_date - self.start_date).days + 1
def __str__(self):
return str(self.utility.name) + ": " + str(self.charge) + \
", " + calendar.month_abbr[self.start_date.month] + " " + str(self.start_date.day) + \
" - " + calendar.month_abbr[self.end_date.month] + " " + str(self.end_date.day)
class Occupant(models.Model):
name = models.CharField(max_length=1024)
rent = models.DecimalField(max_digits=10, decimal_places=2)
start_date = models.DateField()
end_date = models.DateField()
def get_rent_for_month(self, year, month):
days_in_month = calendar.monthrange(year, month)[1]
start = latest_date(date(year, month, 1), self.start_date)
end = earliest_date(date(year, month, days_in_month), self.end_date)
days_for_rent = (end-start).days + 1
return self.rent / days_in_month * days_for_rent
def __str__(self):
return str(self.name)
def earliest_date(date1, date2):
if date1 < date2:
return date1
else:
return date2
def latest_date(date1, date2):
if date1 > date2:
return date1
else:
return date2
|
"""
ยฉ https://sudipghimire.com.np
Create a class Employee that contains different attributes.
- id
- first_name
- last_name
- project
- department
- salary
Make attributes project, department, and salary as private and use getter and setter methods to get and
set respective values
id should be private and can only be initialized when employee instance is created
first_name and last_name should be initialized with constructor and can be changed any time
"""
# answer
class Employee:
def __init__(self, id, first_name, last_name) -> None:
self.__id = id
self.first_name = first_name
self.last_name = last_name
self.__project = ''
self.__department = ''
self.__salary = ''
def get_project(self):
return self.__project
def get_department(self):
return self.__department
def get_salary(self):
return self.__salary
def set_project(self, project: str):
self.__project = project
def set_department(self, department: str):
self.__department = department
def set_salary(self, salary: int):
self.__salary = salary
# Test
john = Employee(1, "John", "Doe")
john.set_project("ABC management system")
john.set_salary(5000)
john.set_department("Software")
print(john.get_department())
print(john.get_salary())
print(john.get_project())
john.last_name = "Lennon"
print(john.first_name, john.last_name)
|
import pandas as pd
import numpy as np
import math
def score(data):
score=True
## Calification rules
# Rodilla
max_border_1 = 210 # Max threshold for valid point
min_border_1 = 0 # Min threshold for valid point
min_thr_1 = 120
mid_thr_1 = 110
# Cadera
max_border_2 = 210 # Max threshold for valid point
min_border_2 = 0 # Min threshold for valid point
min_thr_2 = 120
mid_thr_2 = 100
time_thr_min = 15 # Minimun valid time
time_thr_max = 20
# Read data
df = data
#Orde data
df=df[['Second','Angle']]
df['time']=df['Second']
df[['ang_1','ang_2']]=pd.DataFrame(df["Angle"].to_list(), columns=['ang_1','ang_2'])
df['ang_1']=df['ang_1'].astype(float)
df['ang_2']=df['ang_2'].astype(float)
df.drop(columns=['Angle', 'Second'], inplace=True)
#Fix bad frames
df.loc[df['ang_1'] > max_border_1, 'ang_1'] = df.ang_1.mean()
df.loc[df['ang_1'] < min_border_1, 'ang_1'] = df.ang_1.mean()
df.loc[df['ang_2'] > max_border_2, 'ang_2'] = df.ang_2.mean()
df.loc[df['ang_2'] < min_border_2, 'ang_2'] = df.ang_2.mean()
#Calculate each frame rate
df['frame_t'] = abs(df.time - df.time.shift(1))
fps=round(1/df.frame_t.median(),2)
#Save original stats
df_o_stats=df.describe().astype(float).round(3).to_dict()
#Get valid angles
df_1 = df[(df.ang_1<=min_thr_1)] #Select by 1st angle
df_2 = df_1[(df_1.ang_2<=min_thr_2)] #Select by 2nd angle
valid_time = df_2.frame_t.sum()
valid_time = round(valid_time,2)
#Save final stats
df_stats=df_2.describe().astype(float).round(3).to_dict()
#Evaluate
score = True
time_rec=[]
angle_rec=[]
mean_ang_1_o=df.ang_1.mean()
mean_ang_2_o=df.ang_2.mean()
mean_ang_1=df_2.ang_1.mean()
mean_ang_2=df_2.ang_2.mean()
""" mean_ang_1=60
mean_ang_2=110
valid_time=18 """
print(mean_ang_1)
print(mean_ang_2)
print(valid_time)
if(valid_time<time_thr_min):
score = False
time_score=1
rec='Intรฉntalo de nuevo, recuerda mantener la posiciรณn el mรกximo tiempo posible.'
time_rec.append(rec)
#Cadera
if(mean_ang_2_o<=mid_thr_2):
ang_2_score=3
elif(mean_ang_2_o<=min_thr_2):
ang_2_score=2
rec='ยกMuy bien! Intenta levantar un poco mรกs tu pierna para lograr la posiciรณn perfecta'
angle_rec.append(rec)
else:
ang_2_score=1
rec='Intรฉntalo de nuevo, recuerda que debes levantar mรกs la pierna ยกTรบ puedes!'
angle_rec.append(rec)
if(mean_ang_1_o>min_thr_1):
ang_1_score=1
rec='Intรฉntalo de nuevo, debes doblar mรกs la rodilla ยกTรบ puedes!'
angle_rec.append(rec)
elif(mean_ang_1_o>mid_thr_1):
ang_1_score=2
rec='ยกMuy bien! Intenta doblar un poco mรกs tu rodilla '
angle_rec.append(rec)
else:
ang_1_score=3
elif (valid_time<time_thr_max):
time_score=2
rec='ยกMuy bien! Lograste 15 segundos de mantenciรณn de elongaciรณn ยกSigue asรญ!'
time_rec.append(rec)
if(mean_ang_1>mid_thr_1):
ang_1_score=2
rec='ยกMuy bien! Intenta doblar un poco mรกs tu rodilla '
angle_rec.append(rec)
else:
ang_1_score=3
if(mean_ang_2>mid_thr_2):
ang_2_score=2
rec='ยกMuy bien! Intenta levantar un poco mรกs tu pierna para lograr la posiciรณn perfecta'
angle_rec.append(rec)
else:
ang_2_score=3
else:
time_score=3
if(mean_ang_1>mid_thr_1):
ang_1_score=2
rec='ยกMuy bien! Intenta doblar un poco mรกs tu rodilla '
angle_rec.append(rec)
else:
ang_1_score=3
if(mean_ang_2>mid_thr_2):
ang_2_score=2
rec='ยกMuy bien! Intenta levantar un poco mรกs tu pierna para lograr la posiciรณn perfecta'
angle_rec.append(rec)
else:
ang_2_score=3
if(ang_1_score<ang_2_score):
ang_score=ang_1_score
else:
ang_score=ang_2_score
stats={
'original_stats':df_o_stats,
'result_stats':df_stats,
'n_rep': None,
'valid_time': float(valid_time),
'fps': float(fps),
}
result={
'time': time_score,
'angle': ang_score,
'ang_rec': angle_rec,
'time_rec': time_rec,
'score': score,
'stats':stats
}
return result
|
# -*- coding: utf-8 -*-
"""
Author: Juliette Monsel
License: GNU GPLv3
Source: https://github.com/j4321/tkColorPicker
Edited by RedFantom for Python 2/3 cross-compatibility and docstring formatting
tkcolorpicker - Alternative to colorchooser for Tkinter.
Copyright 2017 Juliette Monsel <[email protected]>
tkcolorpicker is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
tkcolorpicker is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Alpha channel gradient bar
"""
from PIL import Image, ImageTk
from .functions import tk, round2, rgb_to_hsv
from .functions import create_checkered_image
class AlphaBar(tk.Canvas):
"""Bar to select alpha value."""
def __init__(self, parent, alpha=255, color=(255, 0, 0), height=11,
width=256, variable=None, **kwargs):
"""
Create a bar to select the alpha value.
:param parent: parent widget
:type parent: widget
:param alpha: initially selected alpha value (between 0 and 255)
:type alpha: int
:param color: gradient color in RGB format
:type color: tuple[int]
:param variable: variable linked to the alpha value
:type variable: IntVar
:param height: height of the widget in pixels
:type height: int
:param width: width of the widget in pixels
:type width: int
:param kwargs: options to be passed on to the :class:`tk.Canvas` initializer
"""
tk.Canvas.__init__(self, parent, width=width, height=height, **kwargs)
self.gradient = tk.PhotoImage(master=self, width=width, height=height)
self._variable = variable
if variable is not None:
try:
alpha = int(variable.get())
except Exception:
pass
else:
self._variable = tk.IntVar(self)
if alpha > 255:
alpha = 255
elif alpha < 0:
alpha = 0
self._variable.set(alpha)
try:
self._variable.trace_add("write", self._update_alpha)
except Exception:
self._variable.trace("w", self._update_alpha)
self.bind('<Configure>', lambda e: self._draw_gradient(alpha, color))
self.bind('<ButtonPress-1>', self._on_click)
self.bind('<B1-Motion>', self._on_move)
def _draw_gradient(self, alpha, color):
"""Draw the gradient and put the cursor on alpha."""
self.delete("gradient")
self.delete("cursor")
del self.gradient
width = self.winfo_width()
height = self.winfo_height()
bg = create_checkered_image(width, height)
r, g, b = color
w = width - 1.
gradient = Image.new("RGBA", (width, height))
for i in range(width):
for j in range(height):
gradient.putpixel((i, j), (r, g, b, round2(i / w * 255)))
self.gradient = ImageTk.PhotoImage(Image.alpha_composite(bg, gradient),
master=self)
self.create_image(0, 0, anchor="nw", tags="gardient",
image=self.gradient)
self.lower("gradient")
x = alpha / 255. * width
h, s, v = rgb_to_hsv(r, g, b)
if v < 50:
fill = "gray80"
else:
fill = 'black'
self.create_line(x, 0, x, height, width=2, tags='cursor', fill=fill)
def _on_click(self, event):
"""Move selection cursor on click."""
x = event.x
self.coords('cursor', x, 0, x, self.winfo_height())
self._variable.set(round2((255. * x) / self.winfo_width()))
def _on_move(self, event):
"""Make selection cursor follow the cursor."""
w = self.winfo_width()
x = min(max(event.x, 0), w)
self.coords('cursor', x, 0, x, self.winfo_height())
self._variable.set(round2((255. * x) / w))
def _update_alpha(self, *args):
alpha = int(self._variable.get())
if alpha > 255:
alpha = 255
elif alpha < 0:
alpha = 0
self.set(alpha)
self.event_generate("<<AlphaChanged>>")
def get(self):
"""Return alpha value of color under cursor."""
coords = self.coords('cursor')
return round2((255. * coords[0]) / self.winfo_width())
def set(self, alpha):
"""
Set cursor position on the color corresponding to the alpha value.
:param alpha: new alpha value (between 0 and 255)
:type alpha: int
"""
if alpha > 255:
alpha = 255
elif alpha < 0:
alpha = 0
x = alpha / 255. * self.winfo_width()
self.coords('cursor', x, 0, x, self.winfo_height())
self._variable.set(alpha)
def set_color(self, color):
"""
Change gradient color and change cursor position if an alpha value is supplied.
:param color: new gradient color in RGB(A) format
:type color: tuple[int]
"""
if len(color) == 3:
alpha = self.get()
else:
alpha = color[3]
self._draw_gradient(alpha, color[:3])
|
import os
import random
from torchelper.utils.dist_util import get_rank
from torchelper.models.model_builder import ModelBuilder
from tqdm import tqdm
import torch
import time
import torch.distributed as dist
import torch.multiprocessing as mp
from torchelper.utils.config import merge_cfg
from torchelper.utils.cls_utils import get_cls, new_cls, auto_new_cls
from torchelper.data.base_dataset import get_data_loader
import torch.backends.cudnn as cudnn
import subprocess
from torch.utils.tensorboard import SummaryWriter
from torchelper.utils import logger
def check_close_port(port):
result = subprocess.run(['lsof', '-i:'+str(port)], stdout=subprocess.PIPE)
out = result.stdout.decode('utf-8')
lines = out.split('\n')
if len(lines)<=1:
return
print(out)
lines = lines[1:]
for line in lines:
arr = [s for s in line.split(' ') if len(s)>0]
if len(arr)<2:
continue
pid = int(arr[1])
os.system('kill '+str(pid))
print("kill", pid)
def check_close_gpu_ids(gpu_ids):
if not isinstance(gpu_ids, list):
gpu_ids = [gpu_ids]
print('kill:', gpu_ids)
result = subprocess.run(['nvidia-smi'], stdout=subprocess.PIPE)
out = result.stdout.decode('utf-8')
lines = out.split('\n')
if len(lines)<=1:
return
start_flag = False
for line in lines:
if not start_flag:
if line.startswith('|=') and not '+' in line:
start_flag = True
else:
line = ' '.join(line.split())
arr = line.split(' ')
if len(arr)<8:
continue
gpu_id = int(arr[1])
pid = int(arr[4])
for gid in gpu_ids:
if gpu_id == gid and arr[6].endswith('/python'):
os.system('kill '+str(pid))
print('kill', arr[6])
def get_port(def_port):
result = subprocess.run(['lsof', '-i:'+str(def_port)], stdout=subprocess.PIPE)
out = result.stdout.decode('utf-8')
lines = out.split('\n')
if len(lines)<=1:
return def_port
return get_port(def_port+1)
def validate(net:ModelBuilder, epoch:int, val_dataset):
'''ๆง่ก้ช่ฏ้
:param net: ModelBuilderๅญ็ฑปๅฎไพ
:param epoch: int, ๅฝๅepoch
:param val_dataset: ้ช่ฏ้
:return: ่ฟๅๆ่ฟฐ
:raises ValueError: ๆ่ฟฐๆๅบๅผๅธธๅบๆฏ
'''
if get_rank()==0:
net.set_eval()
# net.before_validate(epoch)
# time.sleep(10)
val_data:dict = {}
for data in tqdm(val_dataset):
res = net.validate(epoch, data)
if res is None:
continue
for key, val in res.items():
val_data[key] = val + val_data.get(key, 0)
line = 'epoch: '+str(epoch)+', '
if val_data is not None:
val_arr = []
for key, val in val_data.items():
val_arr.append(key + ":" + str(val_data[key] * 1.0 / len(val_dataset)))
line = line+', '.join(val_arr)
# val_data = net.after_validate(epoch)
if val_data is not None:
val_arr = []
for key, val in val_data.items():
val_arr.append(key + ":" + str(val_data[key]))
line = line +','+ ', '.join(val_arr)
print(line)
torch.distributed.barrier()
#ๆดๆฐtensorboardๆพ็คบ
def update_tensorboard(builder:ModelBuilder, tb_writer, epoch, step, step_per_epoch_per_gpu, gpu_count):
if tb_writer is None:
return
audios = builder.get_audio_dict()
images = builder.get_img_dict()
scalars = builder.get_scalar_dict()
metrics = builder.get_metric_dict()
audios = audios if audios is not None else {}
images = images if images is not None else {}
scalars = scalars if scalars is not None else {}
metrics = metrics if metrics is not None else {}
step = (epoch*step_per_epoch_per_gpu+step)*gpu_count
# print(epoch, step_per_epoch_per_gpu, epoch*step_per_epoch_per_gpu)
for k, v in audios.items():
if v is None:
logger.warn(k+" is None ... ")
continue
tb_writer.add_audio(k, v, step, sample_rate=16000)
for k, v in images.items():
if v is None:
logger.warn(k+" is None ... ")
continue
tb_writer.add_image(k, v, step)
for k, v in scalars.items():
if v is None:
logger.warn(k+" is None ... ")
continue
tb_writer.add_scalar(k, v, step)
for k, v in metrics.items():
if v is None:
logger.warn(k+" is None ... ")
continue
tb_writer.add_scalar(k, v, step)
def update_pbar(builder:ModelBuilder, pbar):
if pbar is None:
return
scalars = builder.get_scalar_dict()
if scalars is None:
return
msg = []
for k, v in scalars.items():
msg.append('%s:%.5f'%(k, v))
pbar.set_description(', '.join(msg))
def train(gpu_id, cfg, is_dist):
train_data_cfg = merge_cfg(cfg['dataset']['train'], cfg)
val_data_cfg = merge_cfg(cfg['dataset']['val'], cfg)
train_dataset = auto_new_cls(train_data_cfg)
val_dataset = auto_new_cls(val_data_cfg)
gpu_count = len(cfg['ori_gpu_ids'])
train_dataloader = get_data_loader(cfg['batch_per_gpu'], train_dataset, num_workers=0, dist=True)
val_dataloader = get_data_loader(cfg['batch_per_gpu'], val_dataset, dist=False)
builder:ModelBuilder = get_cls(cfg['model_builder'])(cfg, True, cfg['ckpt_dir'])
builder.set_dataset(train_dataset)
dataset_size = len(train_dataloader)
tb_writer = None
step_per_epoch_per_gpu = dataset_size
if gpu_id==0:
tb_writer = SummaryWriter(cfg['ckpt_dir'])
builder.perform_cb('on_begin_train')
for epoch in range(cfg['start_epoch'], cfg['total_epoch']):
# validate(builder, epoch, val_dataloader)
builder.set_train()
if gpu_id==0:
pbar = tqdm(train_dataloader)
enum_data = enumerate(pbar)
else:
pbar = None
enum_data = enumerate(train_dataloader)
builder.perform_cb('on_begin_epoch', epoch=epoch)
for i, data in enum_data:
builder.on_begin_forward(data, epoch, i)
builder.perform_cb('on_begin_step', epoch=epoch, step=i)
builder.forward_wrapper(epoch, i, data)
builder.on_end_forward( epoch, i)
builder.on_begin_backward( epoch, i)
# ่ฎก็ฎloss
builder.backward_wrapper()
if is_dist: # ๅคๅกๅๆญฅ
torch.distributed.barrier()
builder.on_end_backward( epoch, i)
builder.perform_cb('on_end_step', epoch=epoch, step=i)
if gpu_id==0:
update_pbar(builder, pbar)
update_tensorboard(builder, tb_writer, epoch, i, step_per_epoch_per_gpu, gpu_count)
builder.perform_cb('on_end_epoch', epoch=epoch)
# builder.save_model(epoch, save_max_count, save_max_time)
validate(builder, epoch, val_dataloader)
builder.perform_cb('on_end_train')
def train_worker(gpu_id, nprocs, cfg, is_dist, port):
'''็ฌ็ซ่ฟ็จ่ฟ่ก
'''
os.environ['NCCL_BLOCKING_WAIT']="1"
os.environ['NCCL_ASYNC_ERROR_HANDLING']='1'
random.seed(0)
torch.manual_seed(0)
cudnn.deterministic = True
# ๆๅ้ๅบฆ๏ผไธป่ฆๅฏนinput shapeๆฏๅบๅฎๆถๆๆ๏ผๅฆๆๆฏๅจๆ็๏ผ่ๆถๅ่ๆ
ข
torch.backends.cudnn.benchmark = True
dist.init_process_group(backend='nccl',
init_method='tcp://127.0.0.1:'+str(port),
world_size=len(cfg['gpu_ids']),
rank=gpu_id)
torch.cuda.set_device(gpu_id)
# ๆbatchๅๅฒ็ปๅไธชGPU
# cfg['batch_size'] = int(cfg['batch_size'] / nprocs)
train(gpu_id, cfg, is_dist)
def train_main(cfg):
check_close_gpu_ids(cfg['ori_gpu_ids'])
# check_close_port(cfg['port'])
gpu_nums = len(cfg['gpu_ids'])
# if gpu_nums>1:
port = get_port(cfg['port'])
print('init port:', port)
mp.spawn(train_worker, nprocs=gpu_nums, args=(gpu_nums, cfg, True, port))
# else:
# train(cfg['gpu_ids'][0], cfg, False)
|
from typing import Callable
from starlette.requests import Request
from starlette.responses import JSONResponse
from app.src.exception import APIException
def http_exception_factory(status_code: int) -> Callable:
def http_exception(_: Request, exception: APIException) -> JSONResponse:
return JSONResponse(status_code=status_code, content={"message": exception.message})
return http_exception
|
from abc import ABCMeta
from functools import lru_cache
from PIL import Image
import numpy as np
from torch.utils.data.dataset import Dataset as TorchDataset
from torchvision.transforms import ToTensor, Compose
from utils import coerce_to_path_and_check_exist, use_seed
from utils.path import DATASETS_PATH
class _AbstractMultiObjectDataset(TorchDataset):
__metaclass__ = ABCMeta
root = DATASETS_PATH
name = NotImplementedError
n_channels = 3
n_classes = NotImplementedError
img_size = NotImplementedError
N = NotImplementedError
instance_eval = True
def __init__(self, split, **kwargs):
self.data_path = coerce_to_path_and_check_exist(self.root / self.name)
self.split = split
self.eval_mode = kwargs.get('eval_mode', False) or split == 'test'
self.eval_semantic = kwargs.get('eval_semantic', False)
if self.eval_mode:
self.size = 320
elif split == 'val':
with use_seed(42):
self.val_indices = np.random.choice(range(self.N), 100, replace=False)
self.size = 100
else:
self.size = self.N
def __len__(self):
return self.size
def __getitem__(self, idx):
path = self.data_path
if self.split == 'val':
idx = self.val_indices[idx]
inp = self.transform(Image.open(path / 'images' / f'{idx}.png').convert('RGB'))
if self.eval_semantic:
label = (self.transform_gt(Image.open(path / 'sem_masks' / f'{idx}.png').convert('L')) * 255).long()
else:
label = (self.transform_gt(Image.open(path / 'masks' / f'{idx}.png').convert('L')) * 255).long()
return inp, label
@property
@lru_cache()
def transform(self):
return Compose([ToTensor()])
@property
@lru_cache()
def transform_gt(self):
return Compose([ToTensor()])
class DSpritesGrayDataset(_AbstractMultiObjectDataset):
name = 'dsprites_gray'
img_size = (64, 64)
N = 60000
n_classes = 4
class CLEVR6Dataset(_AbstractMultiObjectDataset):
name = 'clevr6'
img_size = (128, 128)
N = 34963
n_classes = 7
class TetrominoesDataset(_AbstractMultiObjectDataset):
name = 'tetrominoes'
img_size = (35, 35)
N = 60000
n_classes = 20
|
# Standard Library
import configparser
# Third Party
import pytest
# CrazyHusk
from crazyhusk import config
def test_config_init(empty_parser: config.UnrealConfigParser) -> None:
assert isinstance(empty_parser, configparser.RawConfigParser)
@pytest.mark.parametrize(
"input_string,output_string",
[
("AxisConfig", "AxisConfig"),
("+AxisConfig", "AxisConfig"),
("-AxisConfig", "AxisConfig"),
(".AxisConfig", "AxisConfig"),
("!AxisConfig", "AxisConfig"),
],
)
def test_config_optionxform(
empty_parser: config.UnrealConfigParser, input_string: str, output_string: str
) -> None:
assert empty_parser.optionxform(input_string) == output_string
|
import numpy as np
import os
import cPickle as pickle
import seaborn as sns
sns.set(style="white", palette="Set2")
import matplotlib.pyplot as plt
log_dir = "./experiments/log/backprop/"
log_dir = "./experiments/paper/temp/"
# filename = log_dir + "01-11-2016--11-51-41-0.p"
for filename in sorted(os.listdir(log_dir)):
filename = log_dir + filename
if filename[-2:]!=".p" or filename.split("-")[1] != "11":
continue
print(filename)
exp = pickle.load(open(filename, "rb"))
title = "%s-C_%g-l_rate_%s" % (exp.dataset.solver, exp.dataset.C, exp.comment.split("rate: ").pop())
# export_name = "./experiments/paper/cifar10/bp/%s" % title
# pickle.dump(exp, open(export_name + ".p", "wb"))
fig = plt.figure()
fig.suptitle(title)
ax1 = fig.add_subplot(211)
train_err, = ax1.plot(exp.train_time, exp.train_err, label="Training error")
val_err, = ax1.plot(exp.val_time, exp.val_err, label="Validation error")
ax1.legend(handles=[train_err, val_err])
ax1.set_ylabel("Error")
ax2 = fig.add_subplot(212)
train_acc, = ax2.plot(exp.train_time, exp.train_acc, label="Training accuracy")
val_acc, = ax2.plot(exp.val_time, exp.val_acc, label="Validation accuracy")
ax2.set_ylim([0., 100.])
ax2.legend(handles=[train_acc, val_acc])
ax2.set_xlabel("Time (s)")
ax2.set_ylabel("Accuracy (%)")
# plt.savefig(export_name + ".png")
# plt.close()
plt.show()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from logging import getLogger
from unittest import TestCase
from conda.base.context import context
from conda.common.compat import text_type
from conda.models.channel import Channel
from conda.models.index_record import IndexJsonRecord
from conda.models.prefix_record import PrefixRecord
log = getLogger(__name__)
blas_value = 'accelerate' if context.subdir == 'osx-64' else 'openblas'
class PrefixRecordTests(TestCase):
def test_prefix_record_no_channel(self):
pr = PrefixRecord(
name='austin',
version='1.2.3',
build_string='py34_2',
build_number=2,
url="https://repo.continuum.io/pkgs/free/win-32/austin-1.2.3-py34_2.tar.bz2",
subdir="win-32",
md5='0123456789',
files=(),
)
assert pr.url == "https://repo.continuum.io/pkgs/free/win-32/austin-1.2.3-py34_2.tar.bz2"
assert pr.channel.canonical_name == 'defaults'
assert pr.subdir == "win-32"
assert pr.fn == "austin-1.2.3-py34_2.tar.bz2"
channel_str = text_type(Channel("https://repo.continuum.io/pkgs/free/win-32/austin-1.2.3-py34_2.tar.bz2"))
assert channel_str == "https://repo.continuum.io/pkgs/free"
assert dict(pr.dump()) == dict(
name='austin',
version='1.2.3',
build='py34_2',
build_number=2,
url="https://repo.continuum.io/pkgs/free/win-32/austin-1.2.3-py34_2.tar.bz2",
md5='0123456789',
files=(),
channel=channel_str,
subdir="win-32",
fn="austin-1.2.3-py34_2.tar.bz2",
constrains=(),
depends=(),
)
def test_provides_features(self):
base = IndexJsonRecord(
name='austin',
version='1.2.3',
build_string='py34_2',
build_number=2,
subdir="win-32",
url="https://repo.continuum.io/pkgs/free/win-32/austin-1.2.3-py34_2.tar.bz2",
)
assert base.track_features == ()
assert base.provides_features == {}
assert dict(base.dump()) == dict(
name='austin',
version='1.2.3',
build='py34_2',
build_number=2,
subdir="win-32",
depends=(),
constrains=(),
)
rec = IndexJsonRecord.from_objects(base, track_features='debug nomkl')
assert rec.track_features == ('debug', 'nomkl')
assert rec.provides_features == {'debug': 'true',
'blas': blas_value}
assert dict(rec.dump()) == dict(
name='austin',
version='1.2.3',
build='py34_2',
build_number=2,
subdir="win-32",
depends=(),
constrains=(),
track_features='debug nomkl',
provides_features={'debug': 'true', 'blas': blas_value},
)
rec = IndexJsonRecord.from_objects(base, track_features='debug nomkl',
provides_features={'blas': 'openblas'})
assert rec.track_features == ('debug', 'nomkl')
assert rec.provides_features == {'blas': 'openblas'}
assert dict(rec.dump()) == dict(
name='austin',
version='1.2.3',
build='py34_2',
build_number=2,
subdir="win-32",
depends=(),
constrains=(),
track_features='debug nomkl',
provides_features={'blas': 'openblas'},
)
rec = IndexJsonRecord.from_objects(base, provides_features={'blas': 'openblas'})
assert rec.track_features == ()
assert rec.provides_features == {'blas': 'openblas'}
assert dict(rec.dump()) == dict(
name='austin',
version='1.2.3',
build='py34_2',
build_number=2,
subdir="win-32",
depends=(),
constrains=(),
provides_features={'blas': 'openblas'},
)
base = IndexJsonRecord(
name='python',
version='1.2.3',
build_string='2',
build_number=2,
subdir="win-32",
url="https://repo.continuum.io/pkgs/free/win-32/austin-1.2.3-py34_2.tar.bz2",
)
assert base.track_features == ()
assert base.provides_features == {}
def test_requires_features(self):
rec = IndexJsonRecord(
name='austin',
version='1.2.3',
build_string='py34_2',
build_number=2,
subdir="win-32",
url="https://repo.continuum.io/pkgs/free/win-32/austin-1.2.3-py34_2.tar.bz2",
features='debug nomkl',
depends=('python 2.7.*', 'numpy 1.11*'),
)
assert rec.features == ('debug', 'nomkl')
assert rec.requires_features == {'debug': 'true', 'blas': blas_value}
assert dict(rec.dump()) == dict(
name='austin',
version='1.2.3',
build='py34_2',
build_number=2,
subdir="win-32",
depends=('python 2.7.*', 'numpy 1.11*'),
constrains=(),
features='debug nomkl',
requires_features={'debug': 'true', 'blas': blas_value},
)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 15 19:59:16 2021
@author: abhay.saini
"""
import argparse
import pandas as pd
import torch
import numpy as np
import datasets
import transformers
from transformers import Trainer, TrainingArguments
import nltk
from pyarrow import csv
from transformers import (
AutoModelForSeq2SeqLM,
AutoTokenizer,
Seq2SeqTrainingArguments,
Seq2SeqTrainer,
DataCollatorForSeq2Seq,
)
parser = argparse.ArgumentParser(description="""Preprocessor""")
parser.add_argument(
"--output_file_name",
action="store",
dest="output_file_name",
default="summary_generated_1.csv",
required=False,
help="""name of the output summaries file""",
)
parser.add_argument(
"--input_path",
action="store",
dest="input_path",
default="/input/wikipedia_connector/cnvrg/wiki_output_2.csv",
required=False,
help="""name of the file containing the wikipedia output""",
)
parser.add_argument(
"--default_model",
action="store",
dest="default_model",
default="./Model/bart_large_cnn_original_1/",
required=False,
help="""cnvrg trained model""",
)
parser.add_argument(
"--min_percent",
action="store",
dest="min_percent",
default="0.07",
required=False,
help="""ratio of minimum length of the summary""",
)
parser.add_argument(
"--encoder_max_length",
action="store",
dest="encoder_max_length",
default="256",
required=True,
help="""hyperparamter while training""",
)
args = parser.parse_args()
language = "english"
address_model_cnvrg = args.default_model
rows_cnt = pd.read_csv(args.input_path).shape[0]
sub1 = "train[:" + str(rows_cnt) + "]"
input_doc = datasets.load_dataset("csv", data_files=args.input_path, split=(str(sub1)))
model_cnvrg = AutoModelForSeq2SeqLM.from_pretrained(address_model_cnvrg)
output_file_name = args.output_file_name
tokenizer = AutoTokenizer.from_pretrained("Tokenizer/")
min_percent = float(args.min_percent)
encoder_max_length = int(args.encoder_max_length)
def generate_summary(test_samples, model):
outputs_1 = []
outputs_str_1 = []
for i in range(len(test_samples)):
inputs = tokenizer(
test_samples["document"][i],
padding="max_length",
truncation=True,
max_length=encoder_max_length,
return_tensors="pt",
)
print(i)
input_ids = inputs.input_ids.to(model.device)
attention_mask = inputs.attention_mask.to(model.device)
min_length_1 = min_percent * len(test_samples["document"][i])
#max_length_1 = max_percent * len(test_samples["document"][i])
outputs = model.generate(
input_ids,
attention_mask=attention_mask,
max_length=500,
min_length=round(min_length_1),
)
outputs_str = tokenizer.batch_decode(outputs, skip_special_tokens=True)
outputs_1.append(outputs)
outputs_str_1.append(outputs_str)
return outputs_1, outputs_str_1
print("defined_generate function")
def batch_tokenize_preprocess(batch, tokenizer, max_source_length, max_target_length):
source, target = batch["document"], batch["summary"]
source_tokenized = tokenizer(
source, padding="max_length", truncation=True, max_length=max_source_length
)
target_tokenized = tokenizer(
target, padding="max_length", truncation=True, max_length=max_target_length
)
batch = {k: v for k, v in source_tokenized.items()}
# Ignore padding in the loss
batch["labels"] = [
[-100 if token == tokenizer.pad_token_id else token for token in l]
for l in target_tokenized["input_ids"]
]
return batch
print("defined tokenize function")
summaries_case_0 = generate_summary(input_doc, model_cnvrg)[1]
print(summaries_case_0)
print("generated summaries")
summaries_generated = pd.DataFrame(summaries_case_0, columns=["Generated_Summary"])
print(summaries_generated)
print("created dataframe")
summaries_generated.to_csv("/cnvrg/{}".format(output_file_name), index=False)
print("outputted summaries")
|
'''Example test script.
Basic checks of device before processed
Input Variables:
args - arguments dictionary given to demo tester that may be used to
changed nature of test.
dev - Example device under test
name - Name of test being run.
results - Results map of all tests.
Test Specific Arguments:
args["hw_rev"] - hardware revision to expect
'''
expected_hw_rev = args["hw_rev"]
output_good("Welcome")
output_normal("Reading device 3.3V power rail.")
mV = dev.read_3v3_rail()
store_value("V3.3 power rail mV", mV)
threshold_check(mV, 3300, 90, "mV", "Power rail check")
output_normal("Checking device current draw.")
mA = dev.read_current()
store_value("mA draw", mA)
threshold_check(mA, 150, 10, "mA", "Power draw")
output_normal("Read hardware revision from device pull ups.")
hw_rev = dev.read_revision()
store_value("HW Rev", hw_rev)
exact_check(hw_rev, expected_hw_rev, "Hardware revision")
|
import functools
import inspect
import logging
from concurrent.futures import ProcessPoolExecutor
from fastapi import FastAPI
from fasture import models
from fasture import task_db
from fasture.plugin_mgr import get_plugin_manager
from fasture.routers import jobs, tasks
log = logging.getLogger(__name__)
executor = ProcessPoolExecutor(max_workers=2)
app = FastAPI()
@app.get("/", description="why not", include_in_schema=False)
async def root():
return {"message": "Hello World"}
app.include_router(tasks.router, prefix="/tasks", tags=["tasks"])
# app.include_router(jobs.router, prefix="/jobs", tags=["jobs"])
{"one": 11, "two": 22}
def make_future(long_running, job_name):
log.info("registering %s", job_name)
@functools.wraps(long_running)
async def wrapped(params: dict) -> dict:
log.info("Queuing %s", job_name)
future = executor.submit(long_running, params)
task = models.Task(
job_name=job_name,
future=future,
)
uid = task_db.store_task(task)
future.add_done_callback(task.future_callback)
return {"task_uid": uid}
return wrapped
def register_dynamic_routes():
pm = get_plugin_manager()
for plugin in pm.get_plugins():
log.info(pm.get_name(plugin))
job_name = pm.get_name(plugin)
doc = plugin.fasture_job.__doc__
summary = description = ""
if doc:
summary, *description = doc.split("\n")
description = inspect.cleandoc("\n".join(description))
app.add_api_route(
f"/launch/{job_name}",
# plugin.fasture_job,
make_future(plugin.fasture_job, job_name),
summary=summary,
description=description,
methods=["POST"],
tags=["jobs"],
)
register_dynamic_routes()
@app.on_event("shutdown")
def shutdown_event():
log.info("Shutting down executor")
app.extra["executor"].shutdown(wait=True)
|
import tkinter as tk
from tkinter import Menu, Tk, Text, DISABLED, RAISED,Frame, FLAT, Button, Scrollbar, Canvas, END
from tkinter import messagebox as MessageBox
from tkinter import ttk
from campo import Campo
from arbol import Arbol
import http.client
#Metodo GET para probar peticiones al servidor
def myGET():
myConnection = http.client.HTTPConnection('localhost', 8000, timeout=10)
headers = {
"Content-type": "text/plain"
}
myConnection.request("GET", "/data/database.tytus", "", headers)
response = myConnection.getresponse()
print("Status: {} and reason: {}".format(response.status, response.reason))
myData = response.read()
print(myData.decode("utf-8") )
myConnection.close()
#Metodo POST para probar peticiones al servidor
def myPOST():
myConnection = http.client.HTTPConnection('localhost', 8000, timeout=10)
headers = {
"Content-type": "text/plain"
}
postData = "Test http.server from http.client :D"
myConnection.request("POST", "/", postData, headers)
response = myConnection.getresponse()
print("Status: {} and reason: {}".format(response.status, response.reason))
myData = response.read()
print(myData.decode("utf-8") )
myConnection.close()
def CrearMenu(masterRoot):
########### menu ############
#Se crea la barra
barraDeMenu=Menu(masterRoot, tearoff=0,relief=FLAT, font=("Verdana", 12),activebackground='red')
#Se crean los menus que se deseen
archivo=Menu(barraDeMenu, tearoff=0)
#Crear las opciones de la opciรณn del menรบ
#Se elimino el comando de crear Ventana por problemas con las imagenes
archivo.add_command(label="Nueva ventana")
archivo.add_command(label="Abrir un documento",command=abrirDoc)
archivo.add_command(label="Abrir un modelo")
archivo.add_separator()
archivo.add_command(label="Nueva Query")
archivo.add_command(label="Guardar como...")
archivo.add_command(label="Guardar")
archivo.add_separator()
archivo.add_command(label="Salir")
#creando el Editar
editar=Menu(barraDeMenu, tearoff=0)
#agregando su lista
editar.add_command(label="Cortar")
editar.add_command(label="Pegar")
editar.add_command(label="Copiar")
editar.add_separator()
editar.add_command(label="Seleccionar todo")
editar.add_command(label="Formato")
editar.add_command(label="Preferencias")
#se agrega Tools
tools=Menu(barraDeMenu, tearoff=0)
#se agrega su lista
tools.add_command(label="Configuraciรณn")
tools.add_command(label="Utilidades")
#Temporary tools to test client-server connection
tools.add_command(label="SELECT (GET)", command = myGET)
tools.add_command(label="CREATE (POST)", command = myPOST)
#se agrega ayuda
ayuda=Menu(barraDeMenu, tearoff=0)
#lista de ayuda
ayuda.add_command(label="Documentaciรณn de TytuSQL")
ayuda.add_command(label="Acerca de TytuSQL")
#Se agrgan los menรบs a la barra
barraDeMenu.add_cascade(label="Archivo",menu=archivo)
barraDeMenu.add_cascade(label="Editar",menu=editar)
barraDeMenu.add_cascade(label="Herramientas",menu=tools)
barraDeMenu.add_cascade(label="Ayuda",menu=ayuda)
#Se indica que la barra de menรบ debe estar en la ventana
return barraDeMenu
def abrirDoc():
MessageBox.showinfo(title="Aviso",message="Hizo clic en abrir documento")
def CrearVentana():
raiz = Tk()
#Configuracion de ventana
raiz.title("TytuSQL") #Cambiar el nombre de la ventana
#raiz.iconbitmap('resources/icon.ico')
raiz.rowconfigure(0, minsize=800, weight=1)
raiz.columnconfigure(1, minsize=800, weight=1)
raiz.config(menu=CrearMenu(raiz), background='silver')
#Frame del Arbol
FrameIzquiero = Frame(raiz, relief=RAISED, bd=2)
FrameIzquiero.pack(side="left", fill="both")
#Se llama a la clase Arbol
Arbol(FrameIzquiero)
#Boton para realizar consulta
Button(raiz, text="Enviar Consulta").pack(side="top",fill="both")
#Consola de Salida
consola = Text(raiz)
consola.pack(side="bottom",fill="both")
consola.insert(1.0,"Consola de Salida")
consola.config(state=DISABLED)
#Campo de texto
Campo(raiz).pack(side="right", fill="both", expand=True)
###### CREAMOS EL PANEL PARA LAS PESTAรAS ########
raiz.mainloop()
def main():
CrearVentana()
if __name__ == "__main__":
main()
|
""" CISCO_ENTITY_SENSOR_MIB
The CISCO\-ENTITY\-SENSOR\-MIB is used to monitor
the values of sensors in the Entity\-MIB (RFC 2037)
entPhysicalTable.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class SensorDataScale(Enum):
"""
SensorDataScale (Enum Class)
International System of Units (SI) prefixes.
.. data:: yocto = 1
.. data:: zepto = 2
.. data:: atto = 3
.. data:: femto = 4
.. data:: pico = 5
.. data:: nano = 6
.. data:: micro = 7
.. data:: milli = 8
.. data:: units = 9
.. data:: kilo = 10
.. data:: mega = 11
.. data:: giga = 12
.. data:: tera = 13
.. data:: exa = 14
.. data:: peta = 15
.. data:: zetta = 16
.. data:: yotta = 17
"""
yocto = Enum.YLeaf(1, "yocto")
zepto = Enum.YLeaf(2, "zepto")
atto = Enum.YLeaf(3, "atto")
femto = Enum.YLeaf(4, "femto")
pico = Enum.YLeaf(5, "pico")
nano = Enum.YLeaf(6, "nano")
micro = Enum.YLeaf(7, "micro")
milli = Enum.YLeaf(8, "milli")
units = Enum.YLeaf(9, "units")
kilo = Enum.YLeaf(10, "kilo")
mega = Enum.YLeaf(11, "mega")
giga = Enum.YLeaf(12, "giga")
tera = Enum.YLeaf(13, "tera")
exa = Enum.YLeaf(14, "exa")
peta = Enum.YLeaf(15, "peta")
zetta = Enum.YLeaf(16, "zetta")
yotta = Enum.YLeaf(17, "yotta")
class SensorDataType(Enum):
"""
SensorDataType (Enum Class)
sensor measurement data types. valid values are\:
other(1)\: a measure other than those listed below
unknown(2)\: unknown measurement, or
arbitrary, relative numbers
voltsAC(3)\: electric potential
voltsDC(4)\: electric potential
amperes(5)\: electric current
watts(6)\: power
hertz(7)\: frequency
celsius(8)\: temperature
percentRH(9)\: percent relative humidity
rpm(10)\: shaft revolutions per minute
cmm(11),\: cubic meters per minute (airflow)
truthvalue(12)\: value takes { true(1), false(2) }
specialEnum(13)\: value takes user defined enumerated values
dBm(14)\: dB relative to 1mW of power
.. data:: other = 1
.. data:: unknown = 2
.. data:: voltsAC = 3
.. data:: voltsDC = 4
.. data:: amperes = 5
.. data:: watts = 6
.. data:: hertz = 7
.. data:: celsius = 8
.. data:: percentRH = 9
.. data:: rpm = 10
.. data:: cmm = 11
.. data:: truthvalue = 12
.. data:: specialEnum = 13
.. data:: dBm = 14
"""
other = Enum.YLeaf(1, "other")
unknown = Enum.YLeaf(2, "unknown")
voltsAC = Enum.YLeaf(3, "voltsAC")
voltsDC = Enum.YLeaf(4, "voltsDC")
amperes = Enum.YLeaf(5, "amperes")
watts = Enum.YLeaf(6, "watts")
hertz = Enum.YLeaf(7, "hertz")
celsius = Enum.YLeaf(8, "celsius")
percentRH = Enum.YLeaf(9, "percentRH")
rpm = Enum.YLeaf(10, "rpm")
cmm = Enum.YLeaf(11, "cmm")
truthvalue = Enum.YLeaf(12, "truthvalue")
specialEnum = Enum.YLeaf(13, "specialEnum")
dBm = Enum.YLeaf(14, "dBm")
class SensorStatus(Enum):
"""
SensorStatus (Enum Class)
Indicates the operational status of the sensor.
ok(1) means the agent can read the sensor
value.
unavailable(2) means that the agent presently
can not report the sensor value.
nonoperational(3) means that the agent believes
the sensor is broken. The sensor could have a
hard failure (disconnected wire), or a soft failure
such as out\-of\-range, jittery, or wildly fluctuating
readings.
.. data:: ok = 1
.. data:: unavailable = 2
.. data:: nonoperational = 3
"""
ok = Enum.YLeaf(1, "ok")
unavailable = Enum.YLeaf(2, "unavailable")
nonoperational = Enum.YLeaf(3, "nonoperational")
class SensorThresholdRelation(Enum):
"""
SensorThresholdRelation (Enum Class)
sensor threshold relational operator types. valid values are\:
lessThan(1)\: if the sensor value is less than
the threshold value
lessOrEqual(2)\: if the sensor value is less than or equal to
the threshold value
greaterThan(3)\: if the sensor value is greater than
the threshold value
greaterOrEqual(4)\: if the sensor value is greater than or equal
to the threshold value
equalTo(5)\: if the sensor value is equal to
the threshold value
notEqualTo(6)\: if the sensor value is not equal to
the threshold value
.. data:: lessThan = 1
.. data:: lessOrEqual = 2
.. data:: greaterThan = 3
.. data:: greaterOrEqual = 4
.. data:: equalTo = 5
.. data:: notEqualTo = 6
"""
lessThan = Enum.YLeaf(1, "lessThan")
lessOrEqual = Enum.YLeaf(2, "lessOrEqual")
greaterThan = Enum.YLeaf(3, "greaterThan")
greaterOrEqual = Enum.YLeaf(4, "greaterOrEqual")
equalTo = Enum.YLeaf(5, "equalTo")
notEqualTo = Enum.YLeaf(6, "notEqualTo")
class SensorThresholdSeverity(Enum):
"""
SensorThresholdSeverity (Enum Class)
sensor threshold severity. Valid values are\:
other(1) \: a severity other than those listed below.
minor(10) \: Minor Problem threshold.
major(20) \: Major Problem threshold.
critical(30)\: Critical problem threshold. A system might shut
down the sensor associated FRU automatically if
the sensor value reach the critical problem
threshold.
.. data:: other = 1
.. data:: minor = 10
.. data:: major = 20
.. data:: critical = 30
"""
other = Enum.YLeaf(1, "other")
minor = Enum.YLeaf(10, "minor")
major = Enum.YLeaf(20, "major")
critical = Enum.YLeaf(30, "critical")
class CISCOENTITYSENSORMIB(Entity):
"""
.. attribute:: entsensorglobalobjects
**type**\: :py:class:`Entsensorglobalobjects <ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB.CISCOENTITYSENSORMIB.Entsensorglobalobjects>`
.. attribute:: entsensorvaluetable
This table lists the type, scale, and present value of a sensor listed in the Entity\-MIB entPhysicalTable
**type**\: :py:class:`Entsensorvaluetable <ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB.CISCOENTITYSENSORMIB.Entsensorvaluetable>`
.. attribute:: entsensorthresholdtable
This table lists the threshold severity, relation, and comparison value, for a sensor listed in the Entity\-MIB entPhysicalTable
**type**\: :py:class:`Entsensorthresholdtable <ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB.CISCOENTITYSENSORMIB.Entsensorthresholdtable>`
"""
_prefix = 'CISCO-ENTITY-SENSOR-MIB'
_revision = '2015-01-15'
def __init__(self):
super(CISCOENTITYSENSORMIB, self).__init__()
self._top_entity = None
self.yang_name = "CISCO-ENTITY-SENSOR-MIB"
self.yang_parent_name = "CISCO-ENTITY-SENSOR-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("entSensorGlobalObjects", ("entsensorglobalobjects", CISCOENTITYSENSORMIB.Entsensorglobalobjects)), ("entSensorValueTable", ("entsensorvaluetable", CISCOENTITYSENSORMIB.Entsensorvaluetable)), ("entSensorThresholdTable", ("entsensorthresholdtable", CISCOENTITYSENSORMIB.Entsensorthresholdtable))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.entsensorglobalobjects = CISCOENTITYSENSORMIB.Entsensorglobalobjects()
self.entsensorglobalobjects.parent = self
self._children_name_map["entsensorglobalobjects"] = "entSensorGlobalObjects"
self._children_yang_names.add("entSensorGlobalObjects")
self.entsensorvaluetable = CISCOENTITYSENSORMIB.Entsensorvaluetable()
self.entsensorvaluetable.parent = self
self._children_name_map["entsensorvaluetable"] = "entSensorValueTable"
self._children_yang_names.add("entSensorValueTable")
self.entsensorthresholdtable = CISCOENTITYSENSORMIB.Entsensorthresholdtable()
self.entsensorthresholdtable.parent = self
self._children_name_map["entsensorthresholdtable"] = "entSensorThresholdTable"
self._children_yang_names.add("entSensorThresholdTable")
self._segment_path = lambda: "CISCO-ENTITY-SENSOR-MIB:CISCO-ENTITY-SENSOR-MIB"
class Entsensorglobalobjects(Entity):
"""
.. attribute:: entsensorthreshnotifglobalenable
This variable enables the generation of entSensorThresholdNotification globally on the device. If this object value is 'false', then no entSensorThresholdNotification will be generated on this device. If this object value is 'true', then whether a entSensorThresholdNotification for a threshold will be generated or not depends on the instance value of entSensorThresholdNotificationEnable for that threshold
**type**\: bool
"""
_prefix = 'CISCO-ENTITY-SENSOR-MIB'
_revision = '2015-01-15'
def __init__(self):
super(CISCOENTITYSENSORMIB.Entsensorglobalobjects, self).__init__()
self.yang_name = "entSensorGlobalObjects"
self.yang_parent_name = "CISCO-ENTITY-SENSOR-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('entsensorthreshnotifglobalenable', YLeaf(YType.boolean, 'entSensorThreshNotifGlobalEnable')),
])
self.entsensorthreshnotifglobalenable = None
self._segment_path = lambda: "entSensorGlobalObjects"
self._absolute_path = lambda: "CISCO-ENTITY-SENSOR-MIB:CISCO-ENTITY-SENSOR-MIB/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(CISCOENTITYSENSORMIB.Entsensorglobalobjects, ['entsensorthreshnotifglobalenable'], name, value)
class Entsensorvaluetable(Entity):
"""
This table lists the type, scale, and present value
of a sensor listed in the Entity\-MIB entPhysicalTable.
.. attribute:: entsensorvalueentry
An entSensorValueTable entry describes the present reading of a sensor, the measurement units and scale, and sensor operational status
**type**\: list of :py:class:`Entsensorvalueentry <ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB.CISCOENTITYSENSORMIB.Entsensorvaluetable.Entsensorvalueentry>`
"""
_prefix = 'CISCO-ENTITY-SENSOR-MIB'
_revision = '2015-01-15'
def __init__(self):
super(CISCOENTITYSENSORMIB.Entsensorvaluetable, self).__init__()
self.yang_name = "entSensorValueTable"
self.yang_parent_name = "CISCO-ENTITY-SENSOR-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("entSensorValueEntry", ("entsensorvalueentry", CISCOENTITYSENSORMIB.Entsensorvaluetable.Entsensorvalueentry))])
self._leafs = OrderedDict()
self.entsensorvalueentry = YList(self)
self._segment_path = lambda: "entSensorValueTable"
self._absolute_path = lambda: "CISCO-ENTITY-SENSOR-MIB:CISCO-ENTITY-SENSOR-MIB/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(CISCOENTITYSENSORMIB.Entsensorvaluetable, [], name, value)
class Entsensorvalueentry(Entity):
"""
An entSensorValueTable entry describes the
present reading of a sensor, the measurement units
and scale, and sensor operational status.
.. attribute:: entphysicalindex (key)
**type**\: int
**range:** 1..2147483647
**refers to**\: :py:class:`entphysicalindex <ydk.models.cisco_ios_xe.ENTITY_MIB.ENTITYMIB.Entphysicaltable.Entphysicalentry>`
.. attribute:: entsensortype
This variable indicates the type of data reported by the entSensorValue. This variable is set by the agent at start\-up and the value does not change during operation
**type**\: :py:class:`SensorDataType <ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB.SensorDataType>`
.. attribute:: entsensorscale
This variable indicates the exponent to apply to sensor values reported by entSensorValue. This variable is set by the agent at start\-up and the value does not change during operation
**type**\: :py:class:`SensorDataScale <ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB.SensorDataScale>`
.. attribute:: entsensorprecision
This variable indicates the number of decimal places of precision in fixed\-point sensor values reported by entSensorValue. This variable is set to 0 when entSensorType is not a fixed\-point type\: e.g.'percentRH(9)', 'rpm(10)', 'cmm(11)', or 'truthvalue(12)'. This variable is set by the agent at start\-up and the value does not change during operation
**type**\: int
**range:** \-8..9
.. attribute:: entsensorvalue
This variable reports the most recent measurement seen by the sensor. To correctly display or interpret this variable's value, you must also know entSensorType, entSensorScale, and entSensorPrecision. However, you can compare entSensorValue with the threshold values given in entSensorThresholdTable without any semantic knowledge
**type**\: int
**range:** \-1000000000..1073741823
.. attribute:: entsensorstatus
This variable indicates the present operational status of the sensor
**type**\: :py:class:`SensorStatus <ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB.SensorStatus>`
.. attribute:: entsensorvaluetimestamp
This variable indicates the age of the value reported by entSensorValue
**type**\: int
**range:** 0..4294967295
.. attribute:: entsensorvalueupdaterate
This variable indicates the rate that the agent updates entSensorValue
**type**\: int
**range:** 0..999999999
**units**\: seconds
.. attribute:: entsensormeasuredentity
This object identifies the physical entity for which the sensor is taking measurements. For example, for a sensor measuring the voltage output of a power\-supply, this object would be the entPhysicalIndex of that power\-supply; for a sensor measuring the temperature inside one chassis of a multi\-chassis system, this object would be the enPhysicalIndex of that chassis. This object has a value of zero when the physical entity for which the sensor is taking measurements can not be represented by any one row in the entPhysicalTable, or that there is no such physical entity
**type**\: int
**range:** 0..2147483647
"""
_prefix = 'CISCO-ENTITY-SENSOR-MIB'
_revision = '2015-01-15'
def __init__(self):
super(CISCOENTITYSENSORMIB.Entsensorvaluetable.Entsensorvalueentry, self).__init__()
self.yang_name = "entSensorValueEntry"
self.yang_parent_name = "entSensorValueTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['entphysicalindex']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('entphysicalindex', YLeaf(YType.str, 'entPhysicalIndex')),
('entsensortype', YLeaf(YType.enumeration, 'entSensorType')),
('entsensorscale', YLeaf(YType.enumeration, 'entSensorScale')),
('entsensorprecision', YLeaf(YType.int32, 'entSensorPrecision')),
('entsensorvalue', YLeaf(YType.int32, 'entSensorValue')),
('entsensorstatus', YLeaf(YType.enumeration, 'entSensorStatus')),
('entsensorvaluetimestamp', YLeaf(YType.uint32, 'entSensorValueTimeStamp')),
('entsensorvalueupdaterate', YLeaf(YType.int32, 'entSensorValueUpdateRate')),
('entsensormeasuredentity', YLeaf(YType.int32, 'entSensorMeasuredEntity')),
])
self.entphysicalindex = None
self.entsensortype = None
self.entsensorscale = None
self.entsensorprecision = None
self.entsensorvalue = None
self.entsensorstatus = None
self.entsensorvaluetimestamp = None
self.entsensorvalueupdaterate = None
self.entsensormeasuredentity = None
self._segment_path = lambda: "entSensorValueEntry" + "[entPhysicalIndex='" + str(self.entphysicalindex) + "']"
self._absolute_path = lambda: "CISCO-ENTITY-SENSOR-MIB:CISCO-ENTITY-SENSOR-MIB/entSensorValueTable/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(CISCOENTITYSENSORMIB.Entsensorvaluetable.Entsensorvalueentry, ['entphysicalindex', 'entsensortype', 'entsensorscale', 'entsensorprecision', 'entsensorvalue', 'entsensorstatus', 'entsensorvaluetimestamp', 'entsensorvalueupdaterate', 'entsensormeasuredentity'], name, value)
class Entsensorthresholdtable(Entity):
"""
This table lists the threshold severity, relation, and
comparison value, for a sensor listed in the Entity\-MIB
entPhysicalTable.
.. attribute:: entsensorthresholdentry
An entSensorThresholdTable entry describes the thresholds for a sensor\: the threshold severity, the threshold value, the relation, and the evaluation of the threshold. Only entities of type sensor(8) are listed in this table. Only pre\-configured thresholds are listed in this table. Users can create sensor\-value monitoring instruments in different ways, such as RMON alarms, Expression\-MIB, etc. Entries are created by the agent at system startup and FRU insertion. Entries are deleted by the agent at FRU removal
**type**\: list of :py:class:`Entsensorthresholdentry <ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB.CISCOENTITYSENSORMIB.Entsensorthresholdtable.Entsensorthresholdentry>`
"""
_prefix = 'CISCO-ENTITY-SENSOR-MIB'
_revision = '2015-01-15'
def __init__(self):
super(CISCOENTITYSENSORMIB.Entsensorthresholdtable, self).__init__()
self.yang_name = "entSensorThresholdTable"
self.yang_parent_name = "CISCO-ENTITY-SENSOR-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("entSensorThresholdEntry", ("entsensorthresholdentry", CISCOENTITYSENSORMIB.Entsensorthresholdtable.Entsensorthresholdentry))])
self._leafs = OrderedDict()
self.entsensorthresholdentry = YList(self)
self._segment_path = lambda: "entSensorThresholdTable"
self._absolute_path = lambda: "CISCO-ENTITY-SENSOR-MIB:CISCO-ENTITY-SENSOR-MIB/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(CISCOENTITYSENSORMIB.Entsensorthresholdtable, [], name, value)
class Entsensorthresholdentry(Entity):
"""
An entSensorThresholdTable entry describes the
thresholds for a sensor\: the threshold severity,
the threshold value, the relation, and the
evaluation of the threshold.
Only entities of type sensor(8) are listed in this table.
Only pre\-configured thresholds are listed in this table.
Users can create sensor\-value monitoring instruments
in different ways, such as RMON alarms, Expression\-MIB, etc.
Entries are created by the agent at system startup and
FRU insertion. Entries are deleted by the agent at
FRU removal.
.. attribute:: entphysicalindex (key)
**type**\: int
**range:** 1..2147483647
**refers to**\: :py:class:`entphysicalindex <ydk.models.cisco_ios_xe.ENTITY_MIB.ENTITYMIB.Entphysicaltable.Entphysicalentry>`
.. attribute:: entsensorthresholdindex (key)
An index that uniquely identifies an entry in the entSensorThresholdTable. This index permits the same sensor to have several different thresholds
**type**\: int
**range:** 1..99999999
.. attribute:: entsensorthresholdseverity
This variable indicates the severity of this threshold
**type**\: :py:class:`SensorThresholdSeverity <ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB.SensorThresholdSeverity>`
.. attribute:: entsensorthresholdrelation
This variable indicates the relation between sensor value (entSensorValue) and threshold value (entSensorThresholdValue), required to trigger the alarm. when evaluating the relation, entSensorValue is on the left of entSensorThresholdRelation, entSensorThresholdValue is on the right. in pseudo\-code, the evaluation\-alarm mechanism is\: ... if (entSensorStatus == ok) then if (evaluate(entSensorValue, entSensorThresholdRelation, entSensorThresholdValue)) then if (entSensorThresholdNotificationEnable == true)) then raise\_alarm(sensor's entPhysicalIndex); endif endif endif ..
**type**\: :py:class:`SensorThresholdRelation <ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB.SensorThresholdRelation>`
.. attribute:: entsensorthresholdvalue
This variable indicates the value of the threshold. To correctly display or interpret this variable's value, you must also know entSensorType, entSensorScale, and entSensorPrecision. However, you can directly compare entSensorValue with the threshold values given in entSensorThresholdTable without any semantic knowledge
**type**\: int
**range:** \-1000000000..1073741823
.. attribute:: entsensorthresholdevaluation
This variable indicates the result of the most recent evaluation of the threshold. If the threshold condition is true, entSensorThresholdEvaluation is true(1). If the threshold condition is false, entSensorThresholdEvaluation is false(2). Thresholds are evaluated at the rate indicated by entSensorValueUpdateRate
**type**\: bool
.. attribute:: entsensorthresholdnotificationenable
This variable controls generation of entSensorThresholdNotification for this threshold. When this variable is 'true', generation of entSensorThresholdNotification is enabled for this threshold. When this variable is 'false', generation of entSensorThresholdNotification is disabled for this threshold
**type**\: bool
"""
_prefix = 'CISCO-ENTITY-SENSOR-MIB'
_revision = '2015-01-15'
def __init__(self):
super(CISCOENTITYSENSORMIB.Entsensorthresholdtable.Entsensorthresholdentry, self).__init__()
self.yang_name = "entSensorThresholdEntry"
self.yang_parent_name = "entSensorThresholdTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['entphysicalindex','entsensorthresholdindex']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('entphysicalindex', YLeaf(YType.str, 'entPhysicalIndex')),
('entsensorthresholdindex', YLeaf(YType.int32, 'entSensorThresholdIndex')),
('entsensorthresholdseverity', YLeaf(YType.enumeration, 'entSensorThresholdSeverity')),
('entsensorthresholdrelation', YLeaf(YType.enumeration, 'entSensorThresholdRelation')),
('entsensorthresholdvalue', YLeaf(YType.int32, 'entSensorThresholdValue')),
('entsensorthresholdevaluation', YLeaf(YType.boolean, 'entSensorThresholdEvaluation')),
('entsensorthresholdnotificationenable', YLeaf(YType.boolean, 'entSensorThresholdNotificationEnable')),
])
self.entphysicalindex = None
self.entsensorthresholdindex = None
self.entsensorthresholdseverity = None
self.entsensorthresholdrelation = None
self.entsensorthresholdvalue = None
self.entsensorthresholdevaluation = None
self.entsensorthresholdnotificationenable = None
self._segment_path = lambda: "entSensorThresholdEntry" + "[entPhysicalIndex='" + str(self.entphysicalindex) + "']" + "[entSensorThresholdIndex='" + str(self.entsensorthresholdindex) + "']"
self._absolute_path = lambda: "CISCO-ENTITY-SENSOR-MIB:CISCO-ENTITY-SENSOR-MIB/entSensorThresholdTable/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(CISCOENTITYSENSORMIB.Entsensorthresholdtable.Entsensorthresholdentry, ['entphysicalindex', 'entsensorthresholdindex', 'entsensorthresholdseverity', 'entsensorthresholdrelation', 'entsensorthresholdvalue', 'entsensorthresholdevaluation', 'entsensorthresholdnotificationenable'], name, value)
def clone_ptr(self):
self._top_entity = CISCOENTITYSENSORMIB()
return self._top_entity
|
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
SETTINGS = None
def load_settings():
global SETTINGS
try:
SETTINGS = settings.BASE_BACKEND
except AttributeError:
SETTINGS = {}
load_settings()
def get_password_reset_table():
"""
Return the Password Reset model that is active in this project.
"""
if SETTINGS.get("USE_BASE_BACKEND_RESET_PASSWORD_TABLE", False):
return django_apps.get_model("base_backend.PasswordReset", require_ready=False)
else:
try:
return django_apps.get_model(SETTINGS.get("PASSWORD_RESET_TABLE", None), require_ready=False)
except ValueError:
raise ImproperlyConfigured("PHONE_VERIFICATION_OTP_TABLE must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"PHONE_VERIFICATION_OTP_TABLE refers to model '%s' that has not been installed"
% settings.PASSWORD_RESET_TABLE)
def get_otp_verification_table():
"""
Return the OTP Verification model that is active in this project.
"""
if SETTINGS.get("USE_BASE_BACKEND_OTP_TABLE", False):
return django_apps.get_model("base_backend.SmsVerification", require_ready=False)
else:
try:
return django_apps.get_model(SETTINGS.get("PHONE_VERIFICATION_OTP_TABLE", None), require_ready=False)
except ValueError:
raise ImproperlyConfigured("PHONE_VERIFICATION_OTP_TABLE must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"PHONE_VERIFICATION_OTP_TABLE refers to model '%s' that has not been installed"
% settings.PHONE_VERIFICATION_OTP_TABLE)
def get_send_sms_function():
"""
return the implemented function for sending sms, it should accept the named params:
phone: Phone number.
message: A message.
"""
if SETTINGS.get("SEND_SMS_FUNC", None):
return settings.SEND_SMS_FUNC
else:
raise ImproperlyConfigured("you are trying to send an sms, but the SEND_SMS_FUNC is not provided in the"
" settings.")
|
# -*- coding: utf-8
from django.apps import AppConfig
class DjangoGrpcConfig(AppConfig):
name = 'django_grpc'
verbose_name = 'Django gRPC server'
|
from datetime import datetime, timedelta
from babel.dates import format_timedelta, format_date
from collections import namedtuple
import pandas as pd
FeedparserTime = namedtuple(
"FeedparserTime",
[
"tm_year",
"tm_mon",
"tm_mday",
"tm_hour",
"tm_min",
"tm_sec",
"tm_wday",
"tm_yday",
"tm_isdst",
],
)
def pretty_format_date(date: datetime) -> str:
delta = datetime.now() - date
if delta < timedelta(days=7):
return format_timedelta(delta, locale="cs_CZ")
elif date > datetime(year=datetime.now().year, month=1, day=1):
return format_date(date, "d. MMMM", locale="cs_CZ")
else:
return format_date(date, "d. MMMM y", locale="cs_CZ")
def format_articles(selected_articles: pd.DataFrame) -> pd.DataFrame:
# TODO: resolve feedparser time format elsewhere
selected_articles["published"] = selected_articles["published"].map(
pretty_format_date
)
return selected_articles
|
#!/bin/python
#import numpy
import os
from sklearn.svm.classes import SVC
import cPickle
import sys
import pandas as pd
import numpy as np
# Performs K-means clustering and save the model to a local file
if __name__ == '__main__':
if len(sys.argv) != 5:
print "Usage: {0} event_name feat_dir feat_dim output_file".format(sys.argv[0])
print "event_name -- name of the event (P001, P002 or P003 in Homework 1)"
print "feat_dir -- dir of feature files"
print "feat_dim -- dim of features"
print "output_file -- path to save the svm model"
exit(1)
event_name = sys.argv[1]
feat_dir = sys.argv[2]
feat_dim = int(sys.argv[3])
output_file = sys.argv[4]
###########################################
feature = pd.read_csv(feat_dir, header=None, delimiter = ' ')
file_list = '/home/ubuntu/11775-hws/hw1_code/list/all.video'
trn_list = '/home/ubuntu/11775-hws/all_trn.lst'
c = pd.read_csv(trn_list, header=None, delimiter = ' ')
# c = c.replace('P001',0)
# c = c.replace('P002',1)
# c = c.replace('P003',2)
# c = c.fillna(4)
# y_train = c.iloc[:,1]
y_train = pd.get_dummies(c.iloc[:,1], dummy_na = True)
# y_train = np.asarray(y_train)
if event_name == 'P001':
y_train = y_train.iloc[:,0]
elif event_name == 'P002':
y_train = y_train.iloc[:,1]
else :
y_train = y_train.iloc[:,2]
trn_list = c.iloc[:,0]
trn_list = np.asarray(trn_list)
# print(trn_list)
f = open(file_list, "r")
idx = 0
idx_list = []
for line in f.readlines():
if line.replace('\n','') in list(trn_list):
# print('yes')
idx_list.append(idx)
idx += 1
f.close()
x_train = feature.iloc[idx_list, :]
print('x_train', x_train.shape[0], x_train.shape[1])
print('y_train', y_train.shape[0])
svm = SVC(C=0.001, gamma = 0.0001, probability=True)
svm.fit(x_train, y_train)
cPickle.dump(svm, open(output_file,"wb"), cPickle.HIGHEST_PROTOCOL)
##############################
print 'SVM trained successfully for event %s!' % (event_name)
|
# MenuTitle: Round All Hint values
# -*- coding: utf-8 -*-
__doc__ = """
Rounds the hints to a whole number for all glyphs in the font.
"""
for g in Glyphs.font.glyphs:
for l in g.layers:
for h in l.hints:
h.position = round(h.position)
h.width = round(h.width)
if h.originNode:
h.originNode.position = NSPoint(round(h.originNode.position.x), round(h.originNode.position.y))
if h.targetNode:
h.targetNode.position = NSPoint(round(h.targetNode.position.x), round(h.targetNode.position.y))
if h.otherNode1:
h.otherNode1.position = NSPoint(round(h.otherNode1.position.x), round(h.otherNode1.position.y))
if h.otherNode2:
h.otherNode2.position = NSPoint(round(h.otherNode2.position.x), round(h.otherNode2.position.y))
|
import os
import inspect
from functools import partial
from datasets.voc_sbd import VOCSBDDataset
from datasets.seg_transforms import ConstantPad, ToTensor, Normalize
from test import main
if __name__ == '__main__':
project_dir = os.path.dirname(inspect.getabsfile(main))
exp_name = os.path.splitext(os.path.basename(__file__))[0] # Make sure the config and model have the same base name
exp_dir = os.path.join('tests', exp_name)
model = os.path.join('weights', exp_name + '.pth')
data_dir = 'data/vocsbd' # The dataset will be downloaded automatically
test_dataset = partial(VOCSBDDataset, data_dir, 'val')
img_transforms = [ConstantPad(512, lbl_fill=255)]
tensor_transforms = [ToTensor(), Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
os.chdir(project_dir)
os.makedirs(exp_dir, exist_ok=True)
main(exp_dir, model=model, test_dataset=test_dataset, img_transforms=img_transforms,
tensor_transforms=tensor_transforms, forced=True)
|
#!/usr/bin/env python
# coding=utf-8
"""Diksiyonaryo CLI (https://github.com/njncalub/diksiyonaryo-ph).
โโโโโโโ โโโโโโ โโโ โโโโโโโโโโโ โโโ โโโ โโโโโโโ
โโโโโโโโโโโโโโ โโโโ โโโโโโโโโโโ โโโโ โโโโโโโโโโโโโ
โโโ โโโโโโโโโโโโโ โโโ โโโโโโโโโโโ โโโ โโโโโโโ โโโ โโโ
โโโ โโโโโโโโโโโโโ โโโ โโโโโโโโโโโ โโโ โโโโโ โโโ โโโ
โโโโโโโโโโโโโโ โโโ โโโโโโโโโโโ โโโ โโโโโโโโโ
โโโโโโโ โโโโโโ โโโ โโ โโโโโโโโโโโ โโโ โโโโโโโ
โโโโ โโโ โโโโโโ โโโโโโโ โโโ โโโ โโโโโโโ
โโโโโ โโโโโโโโโโโโโโโโโโโ โโโโ โโโโโโโโโโโโโ
โโโโโโ โโโโโโโโโโโโโโโโโโโ โโโ โโโโโโโ โโโ โโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโ โโโ โโโโโ โโโ โโโ
โโโ โโโโโโโโโ โโโโโโ โโโ โโโ โโโโโโโโโ
โโโ โโโโโโโโ โโโโโโ โโโ โโโ โโโโโโโ
Usage:
diksiyonaryo.py [options] (init | drop)
diksiyonaryo.py [options] fetch [<letter>]
diksiyonaryo.py [options] define <word>
diksiyonaryo.py [options] search <query>
diksiyonaryo.py [options] run [<host> <port>]
diksiyonaryo.py [options] shell
diksiyonaryo.py (-h | --help)
diksiyonaryo.py (-v | --version)
diksiyonaryo.py test
Options:
--settings=<file> Use a different settings file
[default: config.settings.local].
--start=<page> When fetching, specify which page to start at
[default: 0].
--end=<page> When fetching, specify which page to end.
--max-pages=<max> Set an upper limit on how many pages the scraper will
fetch, per letter.
--from=<letter> When fetching all letters, specify a letter to start at.
--to=<letter> When fetching all letters, specify a letter to end at.
--debug Force debug mode.
-q, --quiet Decrease amount of text shown [default: False].
-h, --help Show this help message and exit.
-v, --version Show version and exit.
"""
from docopt import docopt
from app import DiksiyonaryoApp, __version__
from utils.settings import load_settings
if __name__ == '__main__':
args = docopt(__doc__, version=__version__)
settings = load_settings(filename=args['--settings'])
app = DiksiyonaryoApp(settings=settings, is_quiet=args['--quiet'])
if args['init']:
app.run_init_db()
elif args['drop']:
app.run_drop_db()
elif args['fetch']:
if args['<letter>']:
app.run_fetch_letter(letter=args['<letter>'],
max_pages=args['--max-pages'],
start=args['--start'], end=args['--end'])
else:
app.run_fetch_all(start=args['--start'], end=args['--end'],
from_letter=args['--from'],
to_letter=args['--to'])
elif args['define']:
app.run_define(word=args['<word>'])
elif args['search']:
app.run_search(query=args['<query>'])
elif args['test']:
app.run_test()
elif args['run']:
options = {
'host': args['<host>'] or settings.API_SERVER_HOST,
'port': args['<port>'] or settings.API_SERVER_PORT,
'debug': args['--debug'] or settings.API_SERVER_DEBUG,
}
app.run_server(**options)
elif args['shell']:
app.run_shell()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-07 18:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(blank=True, max_length=200, null=True)),
('eslogan', models.CharField(blank=True, max_length=600, null=True)),
('imagen_logo', models.CharField(blank=True, max_length=3600, null=True)),
('video', models.CharField(blank=True, max_length=3600, null=True)),
('mision', models.CharField(blank=True, max_length=2000, null=True)),
('vision', models.CharField(blank=True, max_length=2000, null=True)),
('acerca', models.CharField(blank=True, max_length=2000, null=True)),
],
options={
'db_table': 'admin',
'managed': False,
},
),
migrations.CreateModel(
name='Bitacora',
fields=[
('id_bit', models.BigIntegerField(primary_key=True, serialize=False)),
('fecha', models.DateField(blank=True, null=True)),
],
options={
'db_table': 'bitacora',
'managed': False,
},
),
migrations.CreateModel(
name='Chat',
fields=[
('id_chat', models.BigIntegerField(primary_key=True, serialize=False)),
('status', models.CharField(blank=True, max_length=1, null=True)),
('usuario_id_admin', models.BigIntegerField(blank=True, null=True)),
('usuario_id_user', models.BigIntegerField(blank=True, null=True)),
],
options={
'db_table': 'chat',
'managed': False,
},
),
migrations.CreateModel(
name='Elemento',
fields=[
('id_elemento', models.BigIntegerField(primary_key=True, serialize=False)),
('nombre', models.CharField(blank=True, max_length=100, null=True)),
('tipo', models.CharField(blank=True, max_length=1, null=True)),
('data', models.CharField(blank=True, max_length=3600, null=True)),
('permisos', models.BigIntegerField(blank=True, null=True)),
],
options={
'db_table': 'elemento',
'managed': False,
},
),
migrations.CreateModel(
name='Grupo',
fields=[
('id_grupo', models.BigIntegerField(primary_key=True, serialize=False)),
('nombre', models.CharField(blank=True, max_length=80, null=True)),
],
options={
'db_table': 'grupo',
'managed': False,
},
),
migrations.CreateModel(
name='Mensaje',
fields=[
('id_mensaje', models.BigIntegerField(primary_key=True, serialize=False)),
('emisor', models.CharField(blank=True, max_length=200, null=True)),
('fecha', models.DateField(blank=True, null=True)),
('mensaje', models.CharField(blank=True, max_length=2000, null=True)),
],
options={
'db_table': 'mensaje',
'managed': False,
},
),
migrations.CreateModel(
name='Usuario',
fields=[
('id_usuario', models.BigIntegerField(primary_key=True, serialize=False)),
('nombre', models.CharField(blank=True, max_length=200, null=True)),
('apellido', models.CharField(blank=True, max_length=200, null=True)),
('telefono', models.BigIntegerField(blank=True, null=True)),
('direccion', models.CharField(blank=True, max_length=600, null=True)),
('clave', models.CharField(blank=True, max_length=32, null=True)),
('correo', models.CharField(blank=True, max_length=100, null=True)),
('foto', models.CharField(blank=True, max_length=3600, null=True)),
('genero', models.CharField(blank=True, max_length=40, null=True)),
('fecha_nacimiento', models.DateField(blank=True, null=True)),
('fecha_registro', models.DateField(blank=True, null=True)),
('status', models.CharField(blank=True, max_length=1, null=True)),
('status_cuenta', models.CharField(blank=True, max_length=40, null=True)),
],
options={
'db_table': 'usuario',
'managed': False,
},
),
migrations.CreateModel(
name='UsuarioGrupo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'usuario_grupo',
'managed': False,
},
),
]
|
import sys
from direct.showbase.ShowBase import ShowBase
from pandac.PandaModules import *
class MyApp(ShowBase):
def __init__(self):
ShowBase.__init__(self)
# quit when esc is pressed
self.accept('escape',sys.exit)
base.disableMouse()
# load the box model
box = self.loader.loadModel("models/box")
box.reparentTo(render)
box.setScale(2.0, 2.0, 2.0)
box.setPos(8, 50, 0)
panda = base.loader.loadModel("models/panda")
panda.reparentTo(render)
panda.setPos(0, 10, 0)
panda.setScale(0.1, 0.1, 0.1)
cNodePanda = panda.attachNewNode(CollisionNode('cnode_panda'))
cNodePanda.node().addSolid(CollisionSphere(0,0,5,5))
cNodePanda.show()
# CollisionTraverser and a Collision Handler is set up
self.picker = CollisionTraverser()
self.picker.showCollisions(render)
self.pq = CollisionHandlerQueue()
self.pickerNode = CollisionNode('mouseRay')
self.pickerNP = camera.attachNewNode(self.pickerNode)
self.pickerNode.setFromCollideMask(BitMask32.bit(1))
box.setCollideMask(BitMask32.bit(1))
panda.setCollideMask(BitMask32.bit(1))
self.pickerRay = CollisionRay()
self.pickerNode.addSolid(self.pickerRay)
self.picker.addCollider(self.pickerNP,self.pq)
self.accept("mouse1",self.mouseClick)
def mouseClick(self):
print('mouse click')
# check if we have access to the mouse
if base.mouseWatcherNode.hasMouse():
# get the mouse position
mpos = base.mouseWatcherNode.getMouse()
# set the position of the ray based on the mouse position
self.pickerRay.setFromLens(base.camNode,mpos.getX(),mpos.getY())
self.picker.traverse(render)
# if we have hit something sort the hits so that the closest is first and highlight the node
if self.pq.getNumEntries() > 0:
self.pq.sortEntries()
pickedObj = self.pq.getEntry(0).getIntoNodePath()
print('click on ' + pickedObj.getName())
app = MyApp()
app.run() |
"""
2 Beautiful Matrix - https://codeforces.com/problemset/problem/263/A
"""
l = []
for i in range(5):
l.append(list(map(int, input().split())))
# print(l)
for i in range(len(l)):
for j in range(len(l[0])):
if l[i][j] == 1:
x, y = i+1, j+1
break
print(abs(x - 3) + abs(y - 3))
|
from .menu import *
from .stage import *
from .cutscene import *
from .director import Director |
import sys
import unittest
import zeit.content.article.edit.browser.testing
class DivisionBlockTest(
zeit.content.article.edit.browser.testing.EditorTestCase):
@unittest.skipIf(
sys.platform == 'darwin',
'*testing* focus does not work under OSX for reasons unknown')
def test_division_is_focused_after_add(self):
s = self.selenium
self.add_article()
block_id = self.create_block('division')
s.waitForElementPresent('css=#' + block_id + ' textarea:focus')
|
""" $Id: SOAP.py,v 1.2 2000/03/13 22:32:35 kmacleod Exp $
SOAP.py implements the Simple Object Access Protocol
<http://develop.com/SOAP/>
"""
from StringIO import StringIO
from ScarabMarshal import *
from xml.sax import saxlib, saxexts
from types import *
import string
import base64
# just constants
DICT = "dict"
ARRAY = "array"
CHAR = "char"
class SOAPMarshaler(Marshaler):
def __init__(self, stream):
self.written_stream_header = 0
self.write = stream.write
self.flush = stream.flush
def encode_call(self, method, map):
dict = { 'id':0 }
self.m_init(dict)
self.write('''<SOAP:Envelope
xmlns:SOAP="urn:schemas-xmlsoap-org:soap.v1"
xmlns:tbd="ToBeDetermined">
<SOAP:Body>
''')
self._marshal({ method : map }, dict)
self.write(''' </SOAP:Body>
</SOAP:Envelope>
''')
self.m_finish(dict)
def encode_response(self, method, map):
dict = { 'id':0 }
self.m_init(dict)
self.write('''<SOAP:Envelope
xmlns:SOAP="urn:schemas-xmlsoap-org:soap.v1"
xmlns:tbd="ToBeDetermined">
<SOAP:Body>
''')
self._marshal({ method + "Response" : map }, dict)
self.write(''' </SOAP:Body>
</SOAP:Envelope>
''')
self.m_finish(dict)
def encode_fault(self, faultcode, message, runcode, detail = None):
dict = { 'id':0 }
self.m_init(dict)
self.write('''<SOAP:Envelope
xmlns:SOAP="urn:schemas-xmlsoap-org:soap.v1"
xmlns:tbd="ToBeDetermined">
<SOAP:Body>
''')
if detail == None:
self._marshal({ "SOAP:Fault" : { 'faultcode' : faultcode,
'faultstring' : message,
'runcode' : runcode } },
dict)
else:
self._marshal({ "SOAP:Fault" : { 'faultcode' : faultcode,
'faultstring' : message,
'runcode' : runcode,
'detail' : detail } },
dict)
self.write(''' </SOAP:Body>
</SOAP:Envelope>
''')
self.m_finish(dict)
def m_init(self, dict):
self.write('<?xml version="1.0"?>')
def m_finish(self, dict):
self.write("\n\f\n")
def persistent_id(self, object):
return None
def m_reference(self, object, dict):
""" already done """
def m_None(self, object, dict):
""" already done """
def m_int(self, object, dict):
self.write(str(object))
def m_long(self, object, dict):
self.write(str(object))
def m_float(self, object, dict):
self.write(str(object))
def m_complex(self, object, dict):
self.write(str(object))
def m_string(self, object, dict):
self.write(str(object))
def m_list(self, object, dict):
# FIXME missing ref ids
n = len(object)
for k in range(n):
type = self._type(object[k])
self.write('<tbd:urtype' + type + '>')
self._marshal(object[k], dict)
self.write('</tbd:urtype>')
def m_tuple(self, object, dict):
# FIXME missing ref ids
# FIXME set type to tuple
n = len(object)
for k in range(n):
type = self._type(object[k])
self.write('<tbd:urtype' + type + '>')
self._marshal(object[k], dict)
self.write('</tbd:urtype>')
def m_dictionary(self, object, dict):
# FIXME missing ref ids
items = object.items()
n = len(items)
for k in range(n):
key, value = items[k]
type = self._type(value)
self.write('<' + key + type + '>')
self._marshal(value, dict)
self.write('</' + key + '>')
def m_instance(self, object, dict):
# FIXME missing ref ids
cls = object.__class__
module = whichmodule(cls)
name = cls.__name__
try:
getstate = object.__getstate__
except AttributeError:
stuff = object.__dict__
else:
stuff = getstate()
items = stuff.items()
n = len(items)
for k in range(n):
key, value = items[k]
type = self._type(value)
self.write('<' + key + type + '>')
self._marshal(value, dict)
self.write('</' + key + '>')
def _type(self, object):
t = type(object)
if t == ListType:
return ' SOAP:arrayType="tbd:urtype[]"'
elif t == TupleType:
return ' SOAP:arrayType="tbd:urtype[]"'
elif t == NoneType:
return ' xsi:null="1"'
return ''
class SOAPUnmarshaler(Unmarshaler, saxlib.DocumentHandler):
def __init__(self, stream):
self.memo = {}
self.stream = stream
def _unmarshal(self):
self.parse_value_stack = [ {} ]
self.parse_utype_stack = [ DICT ]
self.parse_type_stack = [ ]
self.parser = saxexts.make_parser()
self.parser.setDocumentHandler(self)
self.parser.setErrorHandler(self)
lines = []
stream = self.stream
# FIXME SAX parsers should support this on streams
line = stream.readline()
while (line != "\f\n") and (line != ""):
lines.append(line)
line = stream.readline()
if len(lines) == 0:
raise EOFError
stream = StringIO(string.join(lines))
self.parser.parseFile(stream)
o = self.parse_value_stack[0]
delattr(self, 'parse_value_stack')
self.parser.close()
return o
def startElement(self, name, attrs):
self.chars = ""
xsi_type = None
if attrs.has_key('xsi:type'):
xsi_type = attrs['xsi:type']
elif (attrs.has_key('xsi:null')
and attrs['xsi:null'] == '1'):
xsi_type = "None"
self.parse_type_stack.append(xsi_type)
# FIXME 'list' is a temp hack for a specific user
if (attrs.has_key('SOAP:arrayType')
or (attrs.has_key('type')
and attrs['type'] == 'list')):
self.parse_utype_stack.append(ARRAY)
self.parse_value_stack.append( [ ] )
else:
# will be set to DICT if a sub-element is found
self.parse_utype_stack.append(CHAR)
def endElement(self, name):
# FIXME do something with types
xsi_type = self.parse_type_stack.pop()
utype = self.parse_utype_stack.pop()
if utype is CHAR:
if xsi_type == "None":
value = None
else:
value = self.chars
else:
value = self.parse_value_stack.pop()
# if we're in an element, and our parent element was defaulted
# to CHAR, then we're in a struct and we need to create that
# dictionary.
if self.parse_utype_stack[-1] is CHAR:
self.parse_value_stack.append( { } )
self.parse_utype_stack[-1] = DICT
if self.parse_utype_stack[-1] is DICT:
self.parse_value_stack[-1][name] = value
else:
self.parse_value_stack[-1].append(value)
def characters(self, ch, start, length):
self.chars = self.chars + ch[start:start + length]
def fatalError(self, exc):
raise exc
# Shorthands (credits to and most copied from pickle.py)
def encode_call(file, method, object):
SOAPMarshaler(file).encode_call(method, object)
def encode_calls(method, object):
file = StringIO()
SOAPMarshaler(file).encode_call(method, object)
return file.getvalue()
def encode_response(file, method, object):
SOAPMarshaler(file).encode_response(method, object)
def encode_responses(method, object):
file = StringIO()
SOAPMarshaler(file).encode_response(method, object)
return file.getvalue()
def encode_fault(file, faultcode, message, runcode, detail = None):
SOAPMarshaler(file).encode_fault(faultcode, message, runcode, detail)
def encode_faults(faultcode, message, runcode, detail = None):
file = StringIO()
SOAPMarshaler(file).encode_fault(faultcode, message, runcode, detail)
return file.getvalue()
def dump(object, file):
SOAPMarshaler(file).dump(object)
def dumps(object):
file = StringIO()
SOAPMarshaler(file).dump(object)
return file.getvalue()
def decode(file):
return SOAPUnmarshaler(file).decode()
def decodes(str):
file = StringIO(str)
return SOAPUnmarshaler(file).decode()
def load(file):
return SOAPUnmarshaler(file).load()
def loads(str):
file = StringIO(str)
return SOAPUnmarshaler(file).load()
if __name__ == '__main__':
runtests(load, loads, dump, dumps)
|
from discord.ext import commands
from discord.ext.commands import BadArgument, CommandError
from cogs.utils.DataBase.Items import Item
from cogs.utils.DataBase.guild import Filter
from cogs.utils.errors import BadListType, BadQueryType
def convert_quantity(text: str) -> int:
allowed_symbols = "1234567890ek.+-*/"
if set(text) > set(allowed_symbols):
raise BadArgument("unsupported quantity")
try:
new = eval(text)
return new
except SyntaxError:
for i in text:
if i not in allowed_symbols:
raise BadArgument("unsupported quantity")
new = text.replace('k', "*1e3")
try:
new = eval(new)
return new
except SyntaxError:
raise BadArgument("unsupported quantity")
def to_lower(text):
if text.lower() in ('sell', 'buy', 'all'):
return text.lower()
else:
raise BadListType
def calc_quantity(args):
relation = ''
value = None
for i, arg in enumerate(args):
if arg in ('>', '=', '<'):
relation += arg
else:
value = convert_quantity(arg[i:])
break
if relation in ('>', '<', '=', '<=', '>=', '=') and value is not None:
return relation, value
else:
raise BadQueryType
def listing_args(items, query):
if query is None:
return Filter(items)
kwargs = {}
queries = query.split(' ') # sample: price>=1e6 quantity<20
for q in queries:
if q.startswith("price") or q.startswith("p"):
if len(q.split('price')) == 2:
args = q.split('price')[1]
else:
args = q.split('p')[1]
kwargs['price'], kwargs['price_type'] = calc_quantity(args)
elif q.startswith("quantity") or q.startswith("q"):
if len(q.split('price')) == 2:
args = q.split('price')[1]
else:
args = q.split('q')[1]
kwargs['quantity'], kwargs['quantity_type'] = calc_quantity(args)
# items = None
# price = '0'
# price_type = '>'
# quantity = '1'
# quantity_type = '>='
# order_by = 'time'
# order_type = "DESC"
# trade = False
return Filter(items, **kwargs)
class Incorrect_Args(CommandError):
def __init__(self):
pass
class StockConverter(commands.Converter):
def __getitem__(self, item):
pass
async def convert(self, ctx, args) -> list:
if args.find('=') == -1:
item = await Item.convert(ctx, args)
return [item, 1]
else:
item, quantity = args.split('=')
quantity = convert_quantity(quantity)
item = await Item.convert(ctx, item)
return [item, quantity]
class CompleteConvertor(commands.Converter):
def __getitem__(self, item):
pass
async def convert(self, ctx, argument):
args = argument.split(', ')
if len(args) == 1:
typer = 'money'
trader = await StockConverter().convert(ctx, args[0])
return typer, trader
else:
trader1 = await TradeConvertor().convert(ctx, args[0])
trader2 = await TradeConvertor().convert(ctx, args[1])
return 'item', trader1, trader2
class TradeConvertor(commands.Converter):
async def convert(self, ctx, args):
args = args.split(' ')
return_items = []
for arg in args:
if arg.find('=') != -1:
item, quantity = arg.split('=')
quantity = convert_quantity(quantity)
item = await Item.convert(ctx, item)
return_items.append((item, quantity))
else:
item = await Item.convert(ctx, args.strip())
return_items.append((item, 1))
return return_items
|
from .search.search import Search
|
import praw
import random
import os
from flask import Flask, render_template, redirect, send_from_directory, url_for
from werkzeug.middleware.proxy_fix import ProxyFix
app = Flask(__name__)
app.wsgi_app= ProxyFix(app.wsgi_app)
reddit = praw.Reddit(client_id='pua14mlzkyv5_ZfQ2WmqpQ',
client_secret='T5loHbaVD7m-RNMpFf0z24iOJsInwg',
user_agent='Oxygen',
check_for_async= False)
@app.route("/favicon.png")
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.png',mimetype='favicon.png')
@app.route("/", methods=['GET'])
def home():
subreddits = ['hentai', 'rule34', 'nsfw']
choices = random.choice(subreddits)
nsfw_submission = reddit.subreddit(choices).new(limit=15)
post_to_pick = random.randint(1,15)
for i in range(0, post_to_pick):
submission = next(x for x in nsfw_submission if not x.stickied)
title = submission.title
link = submission.permalink
url = submission.url
upvotes = submission.score
upvote_ratio = submission.upvote_ratio
author = submission.author
print(link)
return render_template("home.html", title=title, url=url, upvotes=upvotes, author=author, upvote_ratio=upvote_ratio, link=link)
@app.route('/hentai')
def hentai():
nsfw_submission = reddit.subreddit("hentai").new(limit=15)
post_to_pick = random.randint(1,15)
for i in range(0, post_to_pick):
submission = next(x for x in nsfw_submission if not x.stickied)
title = submission.title
link = submission.permalink
url = submission.url
upvotes = submission.score
upvote_ratio = submission.upvote_ratio
author = submission.author
return f"<img src='{url}'>"
@app.route("/endpoint")
def endpoints():
pass
@app.route("/api/v1/femboy")
def femboy():
nsfw_submission = reddit.subreddit('FemboysAndHentai').hot()
post_to_pick = random.randint(1,30)
for i in range(0, post_to_pick):
submission = next(x for x in nsfw_submission if not x.stickied)
title = submission.title
return {'status': 200, 'title':title,'link': submission.url}
@app.route("/api/v1/nsfw")
def nsfw():
nsfw_submission = reddit.subreddit('nsfw').hot()
post_to_pick = random.randint(1,30)
for i in range(0, post_to_pick):
submission = next(x for x in nsfw_submission if not x.stickied)
title = submission.title
return {'status':200, 'title':title, 'link':submission.url}
if __name__ == "__main__":
app.run(debug=True, port=2323)
|
# Copyright 2021 InterDigital R&D and Tรฉlรฉcom Paris.
# Author: Giorgia Cantisani
# License: Apache 2.0
"""Utils for fine tuning
"""
import torch
BN_TYPES = (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)
def _make_trainable(module):
"""Unfreezes a given module.
Args:
module: The module to unfreeze
"""
for param in module.parameters():
param.requires_grad = True
module.train()
def _recursive_freeze(module, train_bn=True):
"""Freezes the layers of a given module.
Args:
module: The module to freeze
train_bn: If True, leave the BatchNorm layers in training mode
"""
children = list(module.children())
if not children:
if not (isinstance(module, BN_TYPES) and train_bn):
for param in module.parameters():
param.requires_grad = False
module.eval()
else:
# Make the BN layers trainable
_make_trainable(module)
else:
for child in children:
_recursive_freeze(module=child, train_bn=train_bn)
def freeze(module, n=None, train_bn=True):
"""Freezes the layers up to index n (if n is not None).
Args:
module: The module to freeze (at least partially)
n: Max depth at which we stop freezing the layers. If None, all
the layers of the given module will be frozen.
train_bn: If True, leave the BatchNorm layers in training mode
"""
modules = list(module.modules())
n_max = len(modules) if n is None else int(n)
for module in modules[:n_max]:
_recursive_freeze(module=module, train_bn=train_bn)
for module in modules[n_max:]:
_make_trainable(module=module)
def filter_params(module, train_bn=True):
"""Yields the trainable parameters of a given module.
Args:
module: A given module
train_bn: If True, leave the BatchNorm layers in training mode
Returns:
Generator
"""
children = list(module.children())
if not children:
if not (isinstance(module, BN_TYPES) and train_bn):
for param in module.parameters():
if param.requires_grad:
yield param
else:
for child in children:
for param in filter_params(module=child, train_bn=train_bn):
yield param
def _unfreeze_and_add_param_group(module, optimizer, lr=None, train_bn=True):
"""Unfreezes a module and adds its parameters to an optimizer."""
_make_trainable(module)
params_lr = optimizer.param_groups[0]['lr'] if lr is None else float(lr)
optimizer.add_param_group(
{'params': filter_params(module=module, train_bn=train_bn),
'lr': params_lr / 10.,
})
# Test
if __name__ == '__main__':
# Load pretrained model
klass, args, kwargs, state = torch.load('./demucs/tasnet.th', 'cpu')
model = klass(*args, **kwargs)
model.load_state_dict(state)
# Select individual modules
encoder = model.encoder
separator = model.separator
decoder = model.decoder
# Freeze some layers
freeze(encoder)
freeze(separator, n=338)
# Count trainable parameters
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
nr_parameters = count_parameters(model)
print(nr_parameters)
# Initialize an optimizer
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001) |
from datetime import datetime
import pytest
from brewlog.models import BrewerProfile
@pytest.mark.usefixtures('app')
class TestBrewerProfileObject:
def test_create_no_names(self, user_factory):
user = user_factory(first_name=None, last_name=None, nick=None)
assert 'anonymous' in user.name.lower()
def test_last_created_public_only(self, user_factory):
user_factory(is_public=True)
user_factory(is_public=False)
assert len(BrewerProfile.last_created(public_only=True)) == 1
def test_last_created_all(self, user_factory):
user_factory(is_public=True)
user_factory(is_public=False)
assert len(BrewerProfile.last_created(public_only=False)) == 2
def test_public_ordering(self, user_factory):
user_p_b = user_factory(nick='b', is_public=True)
user_p_a = user_factory(nick='a', is_public=True)
assert BrewerProfile.public(order_by=BrewerProfile.nick).first() == user_p_a
assert BrewerProfile.public().first() == user_p_b
def test_emailconfirmation_set(self, mocker, user_factory):
user = user_factory()
dt = datetime(2019, 6, 14, 22, 11, 30)
mocker.patch(
'brewlog.models.users.datetime',
mocker.Mock(utcnow=mocker.Mock(return_value=dt)),
)
user.set_email_confirmed(True)
assert user.email_confirmed is True
assert user.confirmed_dt == dt
def test_emailconfirmation_clear(self, mocker, user_factory):
dt = datetime(2019, 6, 14, 22, 11, 30)
user = user_factory(email_confirmed=True, confirmed_dt=dt)
user.set_email_confirmed(False)
assert user.email_confirmed is False
assert user.confirmed_dt is None
|
class List(object):
def method(self):
pass
|
""" SamFirm Bot check updates module"""
from asyncio import create_subprocess_shell
from asyncio.subprocess import PIPE
from telethon import events
from samfirm_bot import TG_LOGGER, TG_BOT_ADMINS
from samfirm_bot.samfirm_bot import BOT, SAM_FIRM
from samfirm_bot.utils.checker import is_device, is_region
@BOT.on(events.NewMessage(pattern=r'/samcheck(?:@\S+)?(?: )(.*)(?: )([a-zA-Z0-9]{3})(?: )?(.*)?'))
async def check(event):
"""Send a message when the command /samcheck is sent."""
model = event.pattern_match.group(1).upper()
region = event.pattern_match.group(2).upper()
try:
version = event.pattern_match.group(3).upper()
except IndexError:
version = None
if event.message.sender_id not in TG_BOT_ADMINS:
if not await is_device(model) or not await is_region(region):
await event.reply("**Either model or region is incorrect!**")
return
command = SAM_FIRM.check_update(model, region, version)
bot_reply = await event.reply("__Checking...__")
process = await create_subprocess_shell(command, stdin=PIPE, stdout=PIPE)
output = await process.stdout.read()
output = output.decode().strip()
await process.wait()
if output and "Could not" in output:
await bot_reply.edit("**Not Found!**")
return
if output and "Version" in output:
update = SAM_FIRM.parse_output(output)
TG_LOGGER.info(update)
message = f"**Device**: {SAM_FIRM.get_device_name(update['model'])}\n" \
f"**Model:** {update['model']}\n" \
f"**System Version:** {update['system']}\n" \
f"**Android Version:** {update['android']}\n" \
f"**CSC Version:** {update['csc']}\n" \
f"**Bootloader Version:** {update['bootloader']}\n" \
f"**Release Date:** {update['date']}\n" \
f"**Size:** {update['size']}"
await bot_reply.edit(message)
return
|
# -*- coding: utf-8 -*-
# Copyright (2017-2018) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Python libs
import logging
# 3rd party libs
from flask import abort
from flask import Blueprint
from flask import request
from flask import Response
from flask_api import status
from hpOneView.exceptions import HPOneViewException
# own libs
from oneview_redfish_toolkit.api.session import Session
from oneview_redfish_toolkit.api.session_collection import SessionCollection
from oneview_redfish_toolkit.blueprints.util.response_builder import \
ResponseBuilder
from oneview_redfish_toolkit import client_session
session = Blueprint('session', __name__)
@session.route(SessionCollection.BASE_URI, methods=["GET"])
def get_collection():
result = SessionCollection(client_session.get_session_ids())
return ResponseBuilder.success(result)
@session.route(SessionCollection.BASE_URI + "/" + "<session_id>",
methods=["GET"])
def get_session(session_id):
if session_id not in client_session.get_session_ids():
abort(status.HTTP_404_NOT_FOUND)
return ResponseBuilder.success(Session(session_id))
@session.route(SessionCollection.BASE_URI + "/" + "<session_id>",
methods=["DELETE"])
def delete_session(session_id):
token = request.headers.get('x-auth-token')
session_for_delete = client_session.get_session_id_by_token(token)
if session_id != session_for_delete:
abort(status.HTTP_404_NOT_FOUND)
client_session.clear_session_by_token(token)
return Response(status=status.HTTP_204_NO_CONTENT,
mimetype="application/json")
@session.route(SessionCollection.BASE_URI, methods=["POST"])
def post_session():
"""Posts Session
The response to the POST request to create a session includes:
- An X-Auth-Token header that contains a session auth token that
the client can use an subsequent requests.
- A Location header that contains a link to the newly created
session resource.
- The JSON response body that contains a full representation of
the newly created session object.
Exception:
HPOneViewException: Invalid username or password.
return abort(401)
"""
try:
try:
body = request.get_json()
username = body["UserName"]
password = body["Password"]
except Exception:
error_message = "Invalid JSON key. The JSON request body " \
"must have the keys UserName and Password"
abort(status.HTTP_400_BAD_REQUEST, error_message)
token, session_id = client_session.login(username, password)
sess = Session(session_id)
return ResponseBuilder.success(sess, {
"Location": sess.redfish["@odata.id"],
"X-Auth-Token": token
})
except HPOneViewException as e:
logging.exception('Unauthorized error: {}'.format(e))
abort(status.HTTP_401_UNAUTHORIZED)
|
"""
Serializers for low-level elements of the canonical record.
Specifically, this maps concepts in :mod:`.domain` to low-level elements in
:mod:`arxiv.canonical.record` and visa-versa.
"""
from io import BytesIO
from json import dumps, load
from typing import Callable, IO, Tuple
from ..domain import Version, ContentType, Listing, CanonicalFile, \
VersionedIdentifier, URI
from ..record import RecordStream, RecordVersion, RecordMetadata, \
RecordEntryMembers, RecordListing
from .decoder import CanonicalDecoder
from .encoder import CanonicalEncoder
Key = str
ContentLoader = Callable[[Key], IO[bytes]]
|
import argparse
import logging
from scraper import Scraper
def parse_args():
"""Parse command line arguments"""
args_parser = argparse.ArgumentParser()
args = args_parser.parse_args()
return args
def main():
args = parse_args()
scraper = Scraper(cache_name=__name__, cache_expiry=36000)
# Perform your scraping here
if __name__ == "__main__":
logging.basicConfig(filename="{}.log".format(__name__), level=logging.INFO)
try:
main()
except Exception as e:
logging.exception(e)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.