content
stringlengths 5
1.05M
|
---|
import os
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
class MingwConan(ConanFile):
name = "mingw-builds"
description = "MinGW is a contraction of Minimalist GNU for Windows"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/ericLemanissier/mingw-builds"
license = "ZPL-2.1", "MIT", "GPL-2.0-or-later"
topics = ("gcc", "gnu", "unix", "mingw32", "binutils")
settings = "os", "arch"
options = {"threads": ["posix", "win32"], "exception": ["seh", "sjlj"]}
default_options = {"threads": "posix", "exception": "seh"}
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def validate(self):
valid_os = ["Windows"]
if str(self.settings.os) not in valid_os:
raise ConanInvalidConfiguration("MinGW {} is only supported for the following operating systems: {}"
.format(self.version, valid_os))
valid_arch = ["x86_64"]
if str(self.settings.arch) not in valid_arch:
raise ConanInvalidConfiguration("MinGW {} is only supported for the following architectures on {}: {}"
.format(self.version, str(self.settings.os), valid_arch))
if getattr(self, "settings_target", None):
if str(self.settings_target.os) not in valid_os:
raise ConanInvalidConfiguration("MinGW {} is only supported for the following operating systems: {}"
.format(self.version, valid_os))
valid_arch = ["x86_64"]
if str(self.settings_target.arch) not in valid_arch:
raise ConanInvalidConfiguration("MinGW {} is only supported for the following architectures on {}: {}"
.format(self.version, str(self.settings.os), valid_arch))
if self.settings_target.compiler != "gcc":
raise ConanInvalidConfiguration("Only GCC is allowed as compiler.")
if str(self.settings_target.compiler.threads) != str(self.options.threads):
raise ConanInvalidConfiguration("Build requires 'mingw' provides binaries for gcc "
"with threads={}, your profile:host declares "
"threads={}, please use the same value for both."
.format(self.options.threads,
self.settings_target.compiler.threads))
if str(self.settings_target.compiler.exception) != str(self.options.exception):
raise ConanInvalidConfiguration("Build requires 'mingw' provides binaries for gcc "
"with exception={}, your profile:host declares "
"exception={}, please use the same value for both."
.format(self.options.exception,
self.settings_target.compiler.exception))
def build_requirements(self):
self.build_requires("7zip/19.00")
def build(self):
# Source should be downloaded in the build step since it depends on specific options
url = self.conan_data["sources"][self.version][str(self.options.threads)][str(self.options.exception)]
self.output.info("Downloading: %s" % url["url"])
tools.download(url["url"], "file.7z", sha256=url["sha256"])
self.run("7z x file.7z")
os.remove('file.7z')
def package(self):
target = "mingw64" if self.settings.arch == "x86_64" else "mingw32"
self.copy("*", dst="", src=target)
tools.rmdir(target)
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.rmdir(os.path.join(self.package_folder, "opt", "lib", "cmake"))
def package_info(self):
if getattr(self, "settings_target", None):
if self.settings_target.compiler != "gcc":
raise ConanInvalidConfiguration("Only GCC is allowed as compiler.")
if str(self.settings_target.compiler.threads) != str(self.options.threads):
raise ConanInvalidConfiguration("Build requires 'mingw' provides binaries for gcc "
"with threads={}, your profile:host declares "
"threads={}, please use the same value for both."
.format(self.options.threads,
self.settings_target.compiler.threads))
if str(self.settings_target.compiler.exception) != str(self.options.exception):
raise ConanInvalidConfiguration("Build requires 'mingw' provides binaries for gcc "
"with exception={}, your profile:host declares "
"exception={}, please use the same value for both."
.format(self.options.exception,
self.settings_target.compiler.exception))
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH env var with : {}".format(bin_path))
self.env_info.PATH.append(bin_path)
self.env_info.MINGW_HOME = str(self.package_folder)
self.env_info.CONAN_CMAKE_GENERATOR = "MinGW Makefiles"
self.env_info.CXX = os.path.join(self.package_folder, "bin", "g++.exe").replace("\\", "/")
self.env_info.CC = os.path.join(self.package_folder, "bin", "gcc.exe").replace("\\", "/")
self.env_info.LD = os.path.join(self.package_folder, "bin", "ld.exe").replace("\\", "/")
self.env_info.NM = os.path.join(self.package_folder, "bin", "nm.exe").replace("\\", "/")
self.env_info.AR = os.path.join(self.package_folder, "bin", "ar.exe").replace("\\", "/")
self.env_info.AS = os.path.join(self.package_folder, "bin", "as.exe").replace("\\", "/")
self.env_info.STRIP = os.path.join(self.package_folder, "bin", "strip.exe").replace("\\", "/")
self.env_info.RANLIB = os.path.join(self.package_folder, "bin", "ranlib.exe").replace("\\", "/")
self.env_info.STRINGS = os.path.join(self.package_folder, "bin", "strings.exe").replace("\\", "/")
self.env_info.OBJDUMP = os.path.join(self.package_folder, "bin", "objdump.exe").replace("\\", "/")
self.env_info.GCOV = os.path.join(self.package_folder, "bin", "gcov.exe").replace("\\", "/")
|
from collections import deque
def search(lines, pattern, history=5):
ret = deque(maxlen=history)
for line in lines:
if pattern in line:
ret.append(line)
return ret
if __name__ == '__main__':
with open('../input/d1.txt') as f:
result = search(f, 'python', 5)
print(result) |
import sys
sys.path.append('./linkedlist')
from linkedlist import DoublyLinkedList, DoublyNode
def has_loop(ll):
double_ref = None
runner1 = ll.head
runner2 = ll.head
first_it = True
while runner1 and runner2:
if runner1 == runner2 and not first_it:
double_ref = runner1
break
first_it = False
runner1 = runner1.next
runner2 = runner2.next and runner2.next.next
return double_ref if double_ref is not None else False
if __name__ == '__main__': # tests
n1 = DoublyNode(value=1)
n2 = DoublyNode(value=2)
n3 = DoublyNode(value=3)
n4 = DoublyNode(value=4)
n5 = DoublyNode(value=5)
n6 = DoublyNode(value=6)
n1.next = n2
n2.next = n3
n3.next = n6
n6.next = n4
n4.next = n5
n5.next = n6
ll = DoublyLinkedList(head=n1)
assert has_loop(ll) is not False or not None
assert has_loop(ll) == n6
|
from .._ffi.function import _init_api
_init_api("relay._make", __name__)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# noinspection PyBroadException
try:
import eventlet
eventlet.monkey_patch(all=True, thread=False)
except:
pass
import argparse
import itertools
import logging
import logging.config
import os
import signal
import eventlet.wsgi
import sqlalchemy_utils
import yaml
from flask import Flask, request, g as flask_g
from flask_babel import get_locale, Babel
from flask_babel import gettext
from flask_cors import CORS
from flask_migrate import Migrate
from flask_restful import Api, abort
from flask_swagger_ui import get_swaggerui_blueprint
from limonero import CustomJSONEncoder as LimoneroJSONEncoder
from limonero.cache import cache
from limonero.data_source_api import DataSourceDetailApi, DataSourceListApi, \
DataSourcePermissionApi, DataSourceUploadApi, DataSourceInferSchemaApi, \
DataSourcePrivacyApi, DataSourceDownload, DataSourceSampleApi, \
DataSourceInitializationApi
from limonero.model_api import ModelDetailApi, ModelListApi, ModelDownloadApi
from limonero.models import db, DataSource, Storage
from limonero.privacy_api import GlobalPrivacyListApi, \
AttributePrivacyGroupListApi
from limonero.py4j_init import init_jvm
from limonero.storage_api import StorageDetailApi, StorageListApi, \
StorageMetadataApi
from cryptography.fernet import Fernet
os.chdir(os.environ.get('LIMONERO_HOME', '.'))
# noinspection PyUnusedLocal
def exit_gracefully(s, frame):
os.kill(os.getpid(), signal.SIGTERM)
def create_app(main_module: bool = False):
app = Flask(__name__, static_url_path='/static', static_folder='static')
app.config['BABEL_TRANSLATION_DIRECTORIES'] = os.path.abspath(
'limonero/i18n/locales')
app.json_encoder = LimoneroJSONEncoder
babel = Babel(app)
logging.config.fileConfig('logging_config.ini')
app.secret_key = 'l3m0n4d1'
# Cryptography key
app.download_key = Fernet.generate_key()
app.fernet = Fernet(app.download_key)
# Cache
cache.init_app(app)
# CORS
CORS(app, resources={r"/*": {"origins": "*"}})
api = Api(app)
# Swagger
swaggerui_blueprint = get_swaggerui_blueprint(
'/api/docs',
'/static/swagger.yaml',
config={ # Swagger UI config overrides
'app_name': "Lemonade Caipirinha"
},
# oauth_config={ # OAuth config. See https://github.com/swagger-api/swagger-ui#oauth2-configuration .
# 'clientId': "your-client-id",
# 'clientSecret': "your-client-secret-if-required",
# 'realm': "your-realms",
# 'appName': "your-app-name",
# 'scopeSeparator': " ",
# 'additionalQueryStringParams': {'test': "hello"}
# }
)
app.register_blueprint(swaggerui_blueprint)
mappings = {
'/datasources': DataSourceListApi,
'/datasources/upload': DataSourceUploadApi,
'/datasources/infer-schema/<int:data_source_id>': DataSourceInferSchemaApi,
'/datasources/sample/<int:data_source_id>': DataSourceSampleApi,
'/datasources/initialize/<status>/<int:data_source_id>':
DataSourceInitializationApi,
'/datasources/<int:data_source_id>': DataSourceDetailApi,
'/datasources/<int:data_source_id>/permission/<int:user_id>':
DataSourcePermissionApi,
'/datasources/<int:data_source_id>/privacy': DataSourcePrivacyApi,
'/privacy': GlobalPrivacyListApi,
'/privacy/attribute-groups': AttributePrivacyGroupListApi,
'/models': ModelListApi,
'/models/<int:model_id>': ModelDetailApi,
'/storages': StorageListApi,
'/storages/<int:storage_id>': StorageDetailApi,
'/storages/metadata/<int:storage_id>': StorageMetadataApi,
}
grouped_mappings = itertools.groupby(sorted(mappings.items()),
key=lambda path: path[1])
for view, g in grouped_mappings:
api.add_resource(view, *[x[0] for x in g], endpoint=view.__name__)
app.add_url_rule('/datasources/public/<int:data_source_id>/download',
methods=['GET'], endpoint='DataSourceDownload',
view_func=DataSourceDownload.as_view('download'))
app.add_url_rule('/models/<int:model_id>/download',
methods=['GET'], endpoint='ModelDownloadApi',
view_func=ModelDownloadApi.as_view('download_model'))
migrate = Migrate(app, db)
app.handle_exception
@babel.localeselector
def get_locale():
user = getattr(flask_g, 'user', None)
if user is not None and user.locale:
return user.locale
else:
return request.args.get(
'lang', request.accept_languages.best_match(['en', 'pt', 'es']))
sqlalchemy_utils.i18n.get_locale = get_locale
config_file = None
signal.signal(signal.SIGINT, exit_gracefully)
if main_module:
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str,
help="Config file", required=False)
args = parser.parse_args()
config_file = args.config
if config_file is None:
config_file = os.environ.get('LIMONERO_CONFIG')
logger = logging.getLogger(__name__)
if config_file:
with open(config_file) as f:
config = yaml.load(f, Loader=yaml.FullLoader)['limonero']
app.config['LIMONERO_CONFIG'] = config
app.config["RESTFUL_JSON"] = {"cls": app.json_encoder}
server_config = config.get('servers', {})
app.config['SQLALCHEMY_DATABASE_URI'] = server_config.get(
'database_url')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
is_mysql = 'mysql://' in app.config['SQLALCHEMY_DATABASE_URI']
if config.get('config') is not None and 'config' in config and is_mysql:
app.config.update(config.get('config', {}))
app.config['SQLALCHEMY_POOL_SIZE'] = 10
app.config['SQLALCHEMY_POOL_RECYCLE'] = 240
db.init_app(app)
port = int(config.get('port', 5000))
logger.debug(
gettext('Running in %(mode)s mode', mode=config.get('environment')))
init_jvm(app, logger)
if main_module:
# JVM, used to interact with HDFS.
if config.get('environment', 'dev') == 'dev':
app.run(debug=True, port=port, host='0.0.0.0')
else:
eventlet.wsgi.server(eventlet.listen(('', port)), app)
else:
return app
else:
logger.error(
gettext('Please, set LIMONERO_CONFIG environment variable'))
exit(1)
return app
if __name__ == '__main__':
create_app(__name__ == '__main__')
|
import random
import time
import numpy as np
from apps.ImageSearch.algs.base import BaseAlgorithm, QUEUE_SIZE
from apps.ImageSearch.algs.utils import is_locked, can_fit, sparse2list
from apps.ImageSearch.algs.models import roc_auc_est_score
from next.utils import debug_print
from sklearn.linear_model import LogisticRegressionCV
class Linear(BaseAlgorithm):
def linear_model(self, cv=3):
return LogisticRegressionCV(cv=cv, scoring=roc_auc_est_score)
def fill_queue(self, butler, args):
if is_locked(butler.algorithms.memory.lock('fill_queue')):
debug_print('fill_queue is running already')
return
try:
queue = butler.algorithms.get(key='queue')
except AttributeError:
debug_print('couldn\'t fill queue, experiment doesn\'t exist yet?')
return
if len(queue) > len(args['queue']):
debug_print('fill_queue called already')
return
with butler.algorithms.memory.lock('fill_queue'):
debug_print('filling queue')
X = self.select_features(butler, {})
t0 = time.time()
d = X.shape[1]
labels = dict(butler.algorithms.get(key='labels'))
n = butler.algorithms.get(key='n')
y = []
unlabeled = []
positives = []
labeled = []
for i in xrange(n):
if i not in labels:
unlabeled.append(i)
else:
labeled.append(i)
y.append(labels[i])
if labels[i] == 1:
positives.append(i)
if can_fit(y, 2):
cv = min(3, sum(y))
model = self.linear_model(cv=cv)
model = model.fit(X[labeled], y)
# mask helps if features are sparse
mask = np.ravel(model.coef_.astype(bool))
if butler.alg_id == 'LassoLinear':
butler.algorithms.set(key='n_coefs', value=sum(mask))
sparse_coefs = sparse2list(model.coef_)
butler.algorithms.set(key='coefs', value=sparse_coefs)
if sum(mask):
X = X[:, mask]
coefs = np.ravel(model.coef_)[mask]
else:
coefs = np.ravel(model.coef_)
dists = np.dot(X[unlabeled], coefs)
dists = np.argsort(-dists)
else:
target = random.choice(positives)
x = X[target]
a, b = np.polyfit([4096*2, 1], [10000, 424924], 1)
n_sample = int(a*d + b)
if len(unlabeled) > n_sample:
debug_print('sampling {} unlabeled examples'.format(n_sample))
unlabeled = random.sample(unlabeled, n_sample)
X = X[unlabeled]
debug_print('computing distances')
t0 = time.time()
dists = np.linalg.norm(X - x, axis=1)
debug_print('took {}s'.format(time.time() - t0))
dists = np.argsort(dists)
queries = butler.algorithms.get(key='queries') - butler.algorithms.get(key='last_filled')
queue_size = max(QUEUE_SIZE, queries * 2)
self.set_queue(butler, [unlabeled[i] for i in dists[:queue_size]])
butler.algorithms.set(key='last_filled', value=butler.algorithms.get(key='queries'))
butler.algorithms.set(key='fill_queue_time', value=time.time() - t0)
def constraint(self, butler):
"""
This should return a function that is of the form:
f(n_coefficients: int) -> bool
That is, takes the number of features used by the model and returns whether that should be included as a possible model.
"""
raise NotImplementedError
|
import unittest
import json
from app import create_app
class TestOrders(unittest.TestCase):
'''set up for testing'''
def setUp(self):
self.app = create_app("testing")
self.client = self.app.test_client()
self.app_context = self.app.app_context()
self.app_context.push()
def tearDown(self):
self.app_context.pop()
def test_create_order(self):
data = {
"name": "eggcurry",
"price": 20,
"description": "sweet eggs"
}
res = self.client.post(
"/api/v1/orders",
data=json.dumps(data),
headers={"content-type": "application/json"}
)
self.assertEqual(res.status_code, 201)
self.assertEqual(json.loads(res.data)['message'], "Food order created")
def test_get_all_orders(self):
res = self.client.get(
"/api/v1/orders",
headers={"content-type": "application/json"}
)
self.assertEqual(res.status_code, 200)
def test_order_by_id(self):
res = self.client.get(
"/api/v1/orders/1",
headers={"content-type": "application/json"}
)
self.assertEqual(res.status_code, 200)
def test_update_order_status(self):
res = self.client.put(
"api/v1/orders/1",
headers={"content-type": "application/json"}
)
print(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(json.loads(res.data)[
'message'], "status approved")
def test_non_order_by_id(self):
res = self.client.get(
"/api/v1/orders/111",
headers={"content-type": "application/json"}
)
self.assertEqual(res.status_code, 404)
self.assertEqual(json.loads(res.data)[
'message'], "Order not found")
def test_non_order_delete(self):
res = self.client.delete(
"api/v1/orders/11",
headers={"content-type": "application/json"}
)
self.assertEqual(res.status_code, 404)
self.assertEqual(json.loads(res.data)[
'message'], "Order not found") |
import FWCore.ParameterSet.Config as cms
MEtoEDMConvertSiStrip = cms.EDProducer("MEtoEDMConverter",
Name = cms.untracked.string('MEtoEDMConverter'),
Verbosity = cms.untracked.int32(0), # 0 provides no output
# 1 provides basic output
# 2 provide more detailed output
Frequency = cms.untracked.int32(50),
MEPathToSave = cms.untracked.string('AlCaReco/SiStrip'),
deleteAfterCopy = cms.untracked.bool(False)
)
seqALCARECOSiStripPCLHistos = cms.Sequence(MEtoEDMConvertSiStrip)
|
import os
import copy
from tasks.task import input_to_src
from tasks.task import decode_properties
from tasks.task import output_to_sink
from tasks.task import find_model
from tasks.task import queue_properties
from tasks.task import inference_properties
from tasks.task import vpp_properties
class Channel():
def __init__(self, runner_config, model_root, system_info, caps, channel_number):
self._runner_config = runner_config
self.channel_number = channel_number
self.system_info = system_info
self.model_root = model_root
self._elements = []
def add_detect_element(self, detect_model_name):
self._detect_model_name = detect_model_name
self._runner_config.setdefault("detect",{})
self._runner_config["detect"].setdefault("element","gvadetect")
self._runner_config["detect"].setdefault("name", "detect" + str(self.channel_number))
self._runner_config["detect"].setdefault("enabled",True)
self._model = find_model(detect_model_name, self.model_root)
queue_name = "detect-queue"
queue_config = self._runner_config.setdefault(queue_name,{})
queue_config.setdefault("element", "queue")
queue_config.setdefault("name", queue_name+str(self.channel_number))
queue_config.setdefault("enabled", False)
self._detect_queue_properties = queue_properties(queue_config,
self._model,
self.system_info)
self._detect_properties = inference_properties(self._runner_config["detect"],
self._model,
detect_model_name,
self.system_info)
self._elements.append(self._detect_queue_properties)
self._elements.append(self._detect_properties)
def add_classify_element(self, classify_model_config):
self.classify_model_config = classify_model_config
self._classify_properties = self._set_classify_properties(self.model_root, self.system_info, detect_model_name=self._detect_model_name,
channel_number=self.channel_number)
self._elements.append(self._classify_properties)
def add_src_element(self, input):
self.input = input
self._src_element = input_to_src(input)
def add_caps_element(self, caps):
self._caps = caps
def add_sink_element(self, output):
self._sink_element = output_to_sink(output,
self._runner_config,
self.channel_number)
def add_decode_element(self):
self._runner_config.setdefault("decode", {"device":"CPU"})
queue_name = "decode-queue"
queue_config = self._runner_config.setdefault(queue_name,{})
self._decode_properties = decode_properties(self._runner_config["decode"],
queue_config,
self.input,
self.system_info, self.channel_number)
self._elements.append(self._decode_properties)
if ("msdk" in self._decode_properties):
self._caps = self.input["extended-caps"]
def add_meta_element(self):
self._meta_element = "gvametaconvert add-empty-results=true ! gvametapublish method=file file-format=json-lines file-path=/tmp/result.jsonl"
def add_fpscounter_element(self):
self._fpscounter = "gvafpscounter"
def add_fakesink_element(self):
self._fakesink = "fakesink async=false"
def add_multifilesink_element(self):
self._multifilesink = "multifilesink location=\"%d.bin\""
def add_vpp_element(self, color_space, region, resolution):
self._runner_config.setdefault("vpp",{"device":"CPU"})
vpp_queue_name = "vpp-queue"
vpp_queue_config = self._runner_config.setdefault(vpp_queue_name,{})
self._vpp_properties = vpp_properties(self._runner_config["vpp"],
vpp_queue_config,
color_space,
region,
resolution,
self.system_info,
self.channel_number)
self._elements.append(self._vpp_properties)
def get_channel_standalone_pipeline(self):
standalone_elements = self._create_standalone_elements()
standalone_elements = " ! ".join([element for element in standalone_elements if element])
return standalone_elements
def get_channel_pipeline(self):
elements = self._create_elements()
elements = " ! ".join([element for element in elements if element])
return elements
def _create_elements(self):
return [self._src_element, self._caps] + self._elements + [self._sink_element]
def _create_standalone_elements(self):
demux = ( "qtdemux" if os.path.splitext(
self.input["source"])[1]==".mp4" else "")
res = ["urisourcebin uri=file://{}".format(self.input["source"]), demux, "parsebin"] + self._elements
if hasattr(self, '_meta_element'):
res = res + [self._meta_element]
if hasattr(self, '_fpscounter'):
res = res + [self._fpscounter]
if hasattr(self, '_fakesink'):
res = res + [self._fakesink]
if hasattr(self, '_multifilesink'):
res = res + [self._multifilesink]
return res
def _set_classify_properties(self, model_root, system_info, detect_model_name="", channel_number=0):
result = ""
elements = []
if (self.classify_model_config):
classify_model_list = self.classify_model_config
for index, model_name in enumerate(classify_model_list):
if (isinstance(model_name,list)):
raise Exception("Dependent Classification Not Supported")
model = find_model(model_name,
model_root)
element_name = "classify-{}".format(index)
classify_config = self._runner_config.setdefault(element_name,
{})
classify_config.setdefault("element","gvaclassify")
classify_config.setdefault("name",element_name+str(channel_number))
classify_config.setdefault("enabled", True)
if detect_model_name=="full_frame":
classify_config.setdefault("inference-region", "full-frame")
queue_name = "classify-{}-queue".format(index)
queue_config = self._runner_config.setdefault(queue_name,{})
queue_config.setdefault("element", "queue")
queue_config.setdefault("name", queue_name+str(channel_number))
queue_config.setdefault("enabled", classify_config["enabled"])
elements.append(queue_properties(queue_config,
model,
system_info))
elements.append(inference_properties(classify_config,
model,
model_name,
system_info))
if (elements):
result = " ! ".join([element for element in elements if element])
return result
|
import requests
from bs4 import BeautifulSoup
link = input("Pass a link to a wikipedia page: ")
url = requests.get(link).text
soup = BeautifulSoup(url, "html.parser")
names = []
# print(soup.find_all("li"))
'''The next part is really specific to this web page
as I needed all car companies in a list'''
'''I have used this link https://pl.wikipedia.org/wiki/Kategoria:Marki_samochod%C3%B3w'''
soup = soup.find("div",class_="mw-category")
for x in soup.find_all("li"):
try:
name = x.text
# if " (marka samochodów)" in name:
# name = name.replace(" (marka samochodów)","")
# elif " (motoryzacja)" in name:
# name = name.replace(" (motoryzacja)","")
# elif " (przedsiębiorstwo)" in name:
# name = name.replace(" (przedsiębiorstwo)","")
names.append(name)
except Exception:
pass
print(names)
with open("test.txt",'w',encoding = 'utf-8') as f:
for name in names:
f.write(name + "\n") |
from cloudman.utils.logger import log
from cloudman.gcp.utils import run, derive_names
from cloudman.utils.misc import attr
def list_instances(name_only=True):
"""Get list of instances in current project"""
vms = run('compute instances list')
return [str(vm['name']) for vm in vms] if name_only else vms
def has_instance(name):
"""Check if an instance with given name exists"""
vms = list_instances()
return name in vms
def get_instance_ip(name):
"""Get the external IP for an instance"""
vms = list_instances(name_only=False)
for vm in vms:
if vm['name'] == name:
return vm["networkInterfaces"][0]["accessConfigs"][0]["natIP"]
def delete_instance(name):
"""Delete an instance"""
log("Removing instance '" + name + "'. This may take a while...", prefix=True)
return run('compute instances delete ' + name + ' --zone=us-west1-b -q')
def create_instance(name, machine, gpu, gpucount=1, spot=True):
"""Create an instance for the given boot disk"""
log("Starting an instance for '" + name +
"' with machine type '" + machine + "' and GPU type '" + gpu + "'")
# Network, firewall & boot instance name
network, _, boot = derive_names(name)
# GPU config
if gpu == 'nogpu':
gpu_arg = ''
else:
gpu_arg = '--accelerator="type={0},count={1}"'.format(gpu, gpucount)
# Preemptible config
spot_arg = '--preemptible' if spot else ''
# Construct & run the command
cmd = """compute instances create {0} \
--subnet={1} \
--network-tier=PREMIUM \
--zone=us-west1-b \
--machine-type={2} \
{3} \
--no-restart-on-failure \
--maintenance-policy=TERMINATE \
--disk=name={4},device-name={5},mode=rw,boot=yes \
{6} \
""".format(name, network, machine, gpu_arg, boot, boot, spot_arg)
return run(cmd)
|
#!/usr/bin/env python
# vim:fileencoding=utf-8:ft=python
#
# Author: R.F. Smith <[email protected]>
# Created: 2011-12-28T14:54:23+01:00
# Last modified: 2020-10-30T05:08:36+0100
#
"""Pull the current git-managed directory from another server and rebase around that."""
import argparse
import json
import logging
import os
import subprocess
import sys
__version__ = "2020.10.30"
def main():
"""Entry point for pull-git."""
args = setup()
srvname = getremote(args.server)
gdir = getpulldir()
arglist = [
"git",
"pull",
"-X",
"theirs",
"--rebase",
f"git://{srvname}/{gdir}",
]
cmd = " ".join(arglist)
logging.info(f'Using command: "{cmd}"')
subprocess.run(arglist)
def setup():
"""Process command-line arguments."""
opts = argparse.ArgumentParser(prog="open", description=__doc__)
opts.add_argument("-v", "--version", action="version", version=__version__)
opts.add_argument(
"-s",
"--server",
default="",
help="remote server to use (overrides ~/.pull-gitrc)",
)
opts.add_argument(
"--log",
default="warning",
choices=["debug", "info", "warning", "error"],
help="logging level (defaults to 'warning')",
)
args = opts.parse_args(sys.argv[1:])
logging.basicConfig(
level=getattr(logging, args.log.upper(), None),
format="%(levelname)s: %(message)s",
)
logging.info(f"command line arguments = {sys.argv}")
logging.info(f"parsed arguments = {args}")
return args
def getremote(override):
"""Get the remote server from ~/.pull-gitrc or the command line.
Verify that the remote is reachable, or quit.
The contents of ~/.pull-gitrc should look like this:
{"remote": "foo.bar.home"}
"""
if not override:
rcpath = os.environ["HOME"] + os.sep + ".pull-gitrc"
try:
with open(rcpath) as rcfile:
config = json.load(rcfile)
except FileNotFoundError:
logging.error("file ~/.pull-gitrc not found and no server provided.")
sys.exit(1)
remote = config["remote"]
logging.info(f"using remote '{remote}' from configuration file.")
else:
remote = override
logging.info(f"using remote '{remote}' from command-line.")
rv = subprocess.run(
["ping", "-c", "1", remote],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
if rv.returncode == 0:
return remote
logging.error(f"remote {remote} cannot be reached by ICMP echo request.")
sys.exit(2)
def getpulldir():
"""Get the name of the directory we're pulling."""
hdir = os.environ["HOME"] + os.sep
curdir = os.getcwd()
if not curdir.startswith(hdir):
logging.error("current directory not in user's home directory; exiting.")
sys.exit(3)
gdir = curdir[len(hdir) :]
return gdir
if __name__ == "__main__":
main()
|
# Copyright 2015 Brocade Communications System, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_vpnaas.services.vpn import vpn_service
class VyattaVPNService(vpn_service.VPNService):
"""Vyatta VPN Service handler."""
def __init__(self, l3_agent):
"""Creates a Vyatta VPN Service instance.
NOTE: Directly accessing l3_agent here is an interim solution
until we move to have a router object given down to device drivers
to access router related methods
"""
super(VyattaVPNService, self).__init__(l3_agent)
self.l3_agent = l3_agent
def get_router_client(self, router_id):
"""
Get Router RESTapi client
"""
return self.l3_agent.get_router_client(router_id)
def get_router(self, router_id):
"""
Get Router Object
"""
return self.l3_agent.get_router(router_id)
|
"""
This code gives a simple example of how to create screen shot using two different methods.
The first method hides all objects in the render view except for the object that
is provided in the argument, takes a screen shot, then brings back all those objects
that were visible.
The second method creates a new template and render view. It imports the object from
the previous render view into the newly created one and transfers over the display properties
to the new render view (this part still has issues), takes a screenshot, then deletes
the new template and renderview.
# option 2 create a new renderView
# input option and
# https://www.paraview.org/Wiki/ParaView/Python_Scripting
# https://discourse.paraview.org/t/feature-request-clone-renderview/2370/3
# https://docs.paraview.org/en/latest/UsersGuide/displayingData.html
"""
import paraview.simple as pvs
import os
def sphere(r, representation, renderView=None, fileName=None):
sphere = pvs.Sphere()
sphere.Radius = r
if not renderView:
renderView = pvs.GetActiveViewOrCreate('RenderView')
sphereDisplay = pvs.Show(sphere, renderView)
sphereDisplay.Representation = representation
if False:
screenshot_hide_show(object=sphere, renderView=renderView, fileName=fileName)
if True:
screenshot_new_renderView(object=sphere, renderView=renderView, fileName=fileName)
def screenshot_hide_show(renderView=None, object=None, fileName=None, tempCamera=None):
visible_objs = []
originalCam = pvs.GetActiveCamera()
# option 1 hide all the objects except for the current one
for name_id, pObject in pvs.GetSources().items():
objDislay = pvs.GetRepresentation(proxy=pObject, view=renderView)
if objDisplay.Visibility:
visible_objs.append(pObject)
if pObject != object:
pvs.Hide(pObject)
if not tempCamera:
renderView.ResetCamra()
tempCamera = pvs.GetActiveCamera()
tempCamera.Azimuth(30)
tempCamera.Elevation(30)
pvs.SaveScreenshot(fileName, renderView, ImageResolution=[1800, 1220])
tempCamera.SetPosition(originalCam.GetPositon())
tempCamera.SetFolcalPoint(originalCam.GetFocalPoint())
for pObject in visible_objs:
pvs.Show(pObject, renderView)
# pvs.ShowAll()
def screenshot_new_renderView(object=None, renderView=None, fileName=None, camera=None):
# option 2 create a new renderView
# input option and
# https://www.paraview.org/Wiki/ParaView/Python_Scripting
# https://discourse.paraview.org/t/feature-request-clone-renderview/2370/3
# https://docs.paraview.org/en/latest/UsersGuide/displayingData.html
if not renderView:
renderView = pvs.GetActiveViewOrCreate('RenderView')
objDisplay = pvs.GetRepresentation(proxy=object, view=renderView) # with this we don't need to pass around objectDisplay variable
tempLayout = pvs.CreateLayout('Temp Layout')
tempRenderView = pvs.CreateRenderView()
pvs.AssignViewToLayout(view=tempRenderView, layout=tempLayout, hint=0)
pvs.SetActiveView(tempRenderView)
pvs.SetActiveSource(object)
# show data in view
tempObjDisplay = pvs.Show(object, tempRenderView)
for property in objDisplay.ListProperties():
# print('\n')
# print(property)
# print(display.GetPropertyValue(property))
# RepresentationTypesInfo gives a Runtime Error message. this is a list of strings
# BlockColor and Block Opacity both give attribute error. they are blank {}
# ColorArrayName produces TypeError: SetElement argument 2: string or None required. this gives [None, ''] might have to use color transfer function
# OpacityArray produces TypeError: SetElement argument 2: string or None required. this gives ['POINTS', 'Normals']
# SetScaleArray producecs TypeError: SetElement argument 2: string or None required. this gives ['POINTS', 'Normals']
problems = ['RepresentationTypesInfo','BlockColor', 'BlockOpacity', 'ColorArrayName','OpacityArray',
'SetScaleArray']
# do try except to catch the problems
if property not in problems:
try:
tempObjDisplay.SetPropertyWithName(property, objDisplay.GetPropertyValue(property))
except...
pvs.Show()
if not camera:
tempRenderView.ResetCamera()
tempCamera = pvs.GetActiveCamera()
tempCamera.Azimuth(30) # Horizontal rotation
tempCamera.Elevation(30) # Vertical rotation
else:
tempCamera = pvs.GetActiveCamera()
tempCamera.SetPosition(camera.GetPosition())
tempCamera.SetFocalPoint(camera)
fname = fname.repalce(' ','_').replace(':','')
pvs.SaveScreenshot(fileName, tempRenderView, ImageResolution=[1800, 1220])
# # destroy temps
pvs.Delete(tempRenderView)
del tempRenderView
pvs.RemoveLayout(tempLayout)
del tempLayout
pvs.SetActiveView(renderView)
r = 2
representation = 'Surface With Edges'
fileName = os.path.join(os.getcwd(),'testimg.png')
sphere(r, representation, fileName=fileName)
if False:
# trace generated using paraview version 5.7.0
#
# To ensure correct image size when batch processing, please search
# for and uncomment the line `# renderView*.ViewSize = [*,*]`
#### import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# create a new 'Sphere'
sphere1 = Sphere()
sphere1.Radius = 2.0
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# show data in view
sphere1Display = Show(sphere1, renderView1)
sphere1Display.Representation = 'Surface'
# reset view to fit data
renderView1.ResetCamera()
# get the material library - not sure what the materials library is
materialLibrary1 = GetMaterialLibrary()
renderView1.Update()
sphere1Display.SetRepresentationType('Surface With Edges')
# get camera animation track for the view
cameraAnimationCue1 = GetCameraTrack(view=renderView1)
# current camera placement for renderView1
renderView1.CameraPosition = [0.0, 12.0, 14.3577]
renderView1.CameraParallelScale = 3.73729
# save screenshot
SaveScreenshot('/Users/Angel/Desktop/pic.png', renderView1, ImageResolution=[1112, 1220],
# PNG options
CompressionLevel='4')
#### saving camera placements for all active views
# current camera placement for renderView1
renderView1.CameraPosition = [0.0, 12.0, 14.3577]
renderView1.CameraParallelScale = 3.73729
if False:
# trace generated using paraview version 5.7.0
#
# To ensure correct image size when batch processing, please search
# for and uncomment the line `# renderView*.ViewSize = [*,*]`
#### import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# create a new 'Sphere'
sphere1 = Sphere()
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# show data in view
sphere1Display = Show(sphere1, renderView1)
sphere1Display.Representation = 'Surface'
renderView1.ResetCamera()
# get the material library
materialLibrary1 = GetMaterialLibrary()
# update the view to ensure updated data information
renderView1.Update()
CreateLayout('Layout #2')
# set active view
SetActiveView(None)
# Create a new 'Render View'
renderView2 = CreateView('RenderView')
renderView2.OSPRayMaterialLibrary = materialLibrary1
# get layout
layout2 = GetLayoutByName("Layout #2")
# assign view to a particular cell in the layout
AssignViewToLayout(view=renderView2, layout=layout2, hint=0)
# set active source
SetActiveSource(sphere1)
# show data in view
sphere1Display_1 = Show(sphere1, renderView2)
# trace defaults for the display properties.
sphere1Display_1.Representation = 'Surface'
# reset view to fit data
renderView2.ResetCamera()
# Properties modified on sphere1
sphere1.Radius = 1.0
# update the view to ensure updated data information
renderView1.Update()
# update the view to ensure updated data information
renderView2.Update()
# destroy renderView2
Delete(renderView2)
del renderView2
RemoveLayout(layout2)
# Properties modified on sphere1
sphere1.Radius = 0.75
# update the view to ensure updated data information
renderView1.Update()
#### saving camera placements for all active views
# current camera placement for renderView1
renderView1.CameraPosition = [0.0, 0.0, 3.2903743041222895]
renderView1.CameraParallelScale = 0.8516115354228021
#### uncomment the following to render all views
# RenderAllViews()
# alternatively, if you want to write images, you can use SaveScreenshot(...).
if False:
from magnetovis import objects
figure_path = '/Users/weigel/git/magnetovis/docs/figures/'
renderView, obj = mvs.cutplane()
# https://kitware.github.io/paraview-docs/latest/python/paraview.simple.html#paraview.simple.SaveScreenshot
SaveScreenshot(sys.path.join(figure_path, 'magnetovis_demo2_cutplane.png',
renderView1, ImageResolution=[2648, 2354]))
renderView, obj = mvs.axes()
# https://kitware.github.io/paraview-docs/latest/python/paraview.simple.html#paraview.simple.SaveScreenshot
SaveScreenshot(sys.path.join(figure_path, 'magnetovis_demo2_axes.png',
renderView1, ImageResolution=[2648, 2354]))
if False:
debug = False
time = [2000, 1, 1, 0, 0, 0]
from magnetovis import objects
mvs.earth(time, debug=debug)
mvs.earth(time, show=False, debug=debug)
import paraview.simple as pvs
renderView = pvs.GetActiveViewOrCreate('RenderView')
sphereDisplay, renderView, sphereVTK = mvs.earth(time, renderView=renderView, show=False, debug=debug)
renderView.CameraViewUp = [0, 1, 0]
renderView.CameraFocalPoint = [0, 0, 0]
renderView.CameraViewAngle = 45
renderView.CameraPosition = [0,0,5]
#pvs.Show(sphereVTK, renderView)
pvs.Show()
#save screenshot
pvs.WriteImage("test.png") |
import torch
from torch import nn as nn
import torch.nn.functional as F
from learning.modules.map_transformer_base import MapTransformerBase
from learning.modules.img_to_img.img_to_features import ImgToFeatures
from visualization import Presenter
from utils.simple_profiler import SimpleProfiler
PROFILE = False
class FPVToFPVMap(nn.Module):
def __init__(self, img_w, img_h, res_channels, map_channels, img_dbg=False):
super(FPVToFPVMap, self).__init__()
self.image_debug = img_dbg
# Provide enough padding so that the map is scaled down by powers of 2.
self.img_to_features = ImgToFeatures(res_channels, map_channels, img_w, img_h)
self.prof = SimpleProfiler(torch_sync=PROFILE, print=PROFILE)
self.actual_images = None
def init_weights(self):
self.img_to_features.init_weights()
def reset(self):
self.actual_images = None
def forward_fpv_features(self, images, sentence_embeds, parent=None):
"""
Compute the first-person image features given the first-person images
If grounding loss is enabled, will also return sentence_embedding conditioned image features
:param images: images to compute features on
:param sentence_embeds: sentence embeddings for each image
:param parent:
:return: features_fpv_vis - the visual features extracted using the ResNet
features_fpv_gnd - the grounded visual features obtained after applying a 1x1 language-conditioned conv
"""
# Extract image features. If they've been precomputed ahead of time, just grab it by the provided index
features_fpv_vis = self.img_to_features(images)
return features_fpv_vis
def forward(self, images, poses, sentence_embeds, parent=None, show=""):
self.prof.tick("out")
features_fpv_vis_only = self.forward_fpv_features(images, sentence_embeds, parent)
return features_fpv_vis_only |
#!/usr/bin/python3
import os
import json
import datetime
import requests
import pprint
def getAccessTokenFromFile():
"""gets the api's access token from ~/.wakaconky
:returns: string
"""
configFile = os.path.expanduser('~/.wakatime.cfg')
try:
c = open(configFile)
except (OSError, IOError) as e:
if e.errno == 2:
return ''
else:
raise e
for line in c:
if 'api_key' in line :
token = line.split(' ')[2]
return token
return ''
def getSummary(token):
"""get the user's summary from wakatime
:returns: json
"""
today = datetime.datetime.now()
startTime = today.strftime('%Y-%m-%d')
params = {'start':startTime, 'end':startTime, 'api_key':token}
route = 'https://wakatime.com/api/v1/users/current/summaries'
return callWakatimeAPI(params, route)
def getStats(token):
"""get the user's stats for the last 7 days
:token: string
:returns: json
"""
timeRange = 'last_7_days'
params = {'api_key':token}
route = 'https://wakatime.com/api/v1/users/current/stats/' + timeRange
return callWakatimeAPI(params, route)
def callWakatimeAPI(params, route):
"""handles the API requests
:params: object
:route: string
:returns: json
"""
headers = {'Accept':'application/x-www-form-urlencoded'}
r = requests.get(route, headers=headers, params=params)
return r.json()
def getWakatimeData(token):
toBeStored = []
summary = getSummary(token)
toBeStored.append('time_spent_today = ' + summary["data"][0]["grand_total"]["text"]) # total
minutes = int(summary["data"][0]["grand_total"]["total_seconds"])/60
percent = minutes/(480/100) # 480 minutes = 8 hours
percent = "{0:.2f}".format(percent)
toBeStored.append('time_spent_today_as_percentage = ' + percent)
toBeStored.append('project_of_the_day = '
+ getTheFirstItem(summary["data"][0]["projects"], "name"))
toBeStored.append('time_on_project_of_the_day = '
+ getTheFirstItem(summary["data"][0]["projects"], "text"))
toBeStored.append('lang_of_the_day = '
+ getTheFirstItem(summary["data"][0]["languages"], "name"))
toBeStored.append('time_on_lang_of_the_day = '
+ getTheFirstItem(summary["data"][0]["languages"], "text"))
last7days = getStats(token)
bestDayInMinutes = int(last7days["data"]["best_day"]["total_seconds"])/60
bestDayInMinutes = "{0:.2f}".format(bestDayInMinutes)
bestDay = str(last7days["data"]["best_day"]["date"]) + ' - ' + str(bestDayInMinutes)
toBeStored.append('best_day = ' + bestDay + ' mins')
weekLang = getTheFirstItem(last7days["data"]["languages"], "name")
weekLang += ' - ' + getTheFirstItem(last7days["data"]["languages"], "text")
toBeStored.append("lang_of_the_week = " + weekLang)
weekProj = getTheFirstItem(last7days["data"]["projects"], "name")
weekProj += ' - ' + getTheFirstItem(last7days["data"]["projects"], "text")
toBeStored.append("project_of_the_week = " + weekProj)
username = last7days["data"]["username"]
toBeStored.append("username = " + username)
appendToWakaconkyData(toBeStored)
def wipeOldData():
"""erases wakaconky.data's content
:returns: boolean
"""
wkdata = os.path.expanduser('~/.wakaconky.data')
try:
open(wkdata, 'w').close()
except (OSError, IOError) as e:
print(e)
return False
return True
def appendToWakaconkyData(toAppend):
dataFile = os.path.expanduser('~/.wakaconky.data')
try:
f = open(dataFile, 'a')
except Exception as e:
raise e
for dataChunk in toAppend:
f.write(dataChunk)
f.write('\n')
f.close()
return True
def getTheFirstItem(l, extract):
if(len(l) == 0):
return 'There is no data yet'
else:
return l[0][extract]
token = getAccessTokenFromFile()
if token == '':
print('api key needed!')
else:
wipeOldData()
getWakatimeData(token)
|
#!/usr/bin/env python
# parse command line options
def cli_opts(argv, inp, call_conv):
import sys, getopt
def print_ft_exit():
print call_conv
sys.exit(2)
try:
opts, args = getopt.getopt(argv, ':'.join(inp.keys()) + ':')
except getopt.GetoptError as e:
print_ft_exit()
print e
except Exception as e:
print e
if len(opts) != len(inp):
print 'Invalid option count'
print_ft_exit()
out = { }
for opt, arg in opts:
if opt in inp.keys():
if inp[opt][0](arg):
out[opt] = inp[opt][1](arg)
else:
print 'Invalid input type for argument %s' % opt
print_ft_exit()
else:
print 'No option of form %s' % opt
print_ft_exit()
return out
if __name__ == '__main__':
import json
from cli_opts import cli_opts
from nhlscrapi._tools import JSONDataEncoder as Encoder
from nhlscrapi import constants as C
from nhlscrapi.games.cumstats import Score, ShotCt, ShotAttemptCt, Corsi, Fenwick
from nhlscrapi.games.game import Game
from nhlscrapi.games.gamekey import GameKey, GameType
# get cli opts
def get_inp_params(args):
# define input parameters and validators
inp = {
'-s': [lambda s: s.isdigit() and int(s) in C.GAME_CT_DICT, lambda s: int(s)],
'-g': [lambda g: g.isdigit(), lambda g: int(g)],
'-r': [lambda r: r.isdigit() and int(r) in [0,1], lambda r: int(r) > 0]
}
call_conv = "gamedata.py -s <season, integer> -g <game_num, integer> -r <reg_season, binary>"
out = cli_opts(args, inp, call_conv)
return out['-s'], out['-g'], out['-r']
# start script
season, game_num, reg_season = get_inp_params(sys.argv[1:])
if not 1 <= game_num <= C.GAME_CT_DICT[season]:
print 'Invalide game number: %i' % game_num
sys.exit(0)
print season, game_num, reg_season
gt = GameType.Regular if reg_season else GameType.Playoffs
gk = GameKey(season, gt, game_num)
cum_stats = {
'Score': Score(),
'Shots': ShotCt(),
'ShotAtt': ShotAttemptCt(),
'Corsi': Corsi(),
'Fenwick': Fenwick()
}
game = Game(gk, cum_stats=cum_stats)
out_f = ''.join(str(x) for x in gk.to_tuple()) + '.json'
with open(out_f, 'w') as f:
# auto computes when using game wrapper
# print 'Final :', game.cum_stats['Score'].total
# print 'Shootout :', game.cum_stats['Score'].shootout.total
# print 'Shots :', game.cum_stats['Shots'].total
# print 'Shot Attempts :', game.cum_stats['ShotAtt'].total
# print 'EV Shot Atts :', game.cum_stats['Corsi'].total
# print 'Corsi :', game.cum_stats['Corsi'].share()
# print 'FW Shot Atts :', game.cum_stats['Fenwick'].total
# print 'Fenwick :', game.cum_stats['Fenwick'].share()
# print '\nRefs :', game.refs
# print 'Linesman :', game.linesman
# print 'Coaches'
# print ' Home :', game.home_coach
# print ' Away :', game.away_coach
#f.write(json.dumps(game, cls=Encoder) + '\n')
from nhlscrapi.scrapr.gamesummrep import GameSummRep
summ = GameSummRep(gk)
print(summ.parse_matchup())
summ.parse_scoring_summary() |
from __future__ import print_function
import os
import sys
from operator import add
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("CreatePackage")\
.getOrCreate()
projects = "s3a://insight-de-data/projects.csv"
project_lines = spark.read.text(projects).rdd.map(lambda r: r[0])\
.map(lambda line: line.split(','))\
.map(lambda x:(x[0],x[1].strip().lower(),x[2],x[7],x[16],x[20]))\
.collect()
# Find the repository for the package
project_path = os.path.join("project_cleaned.csv")
count = 0
with open(project_path, "w", encoding="utf-8") as testFile:
testFile.write('id,platform,name,homepage_url,language,repository_id\n')
for l in project_lines:
v = list(l)
# data cleaning for langauge column
if v[4]== None or v[4]==0 or v[4].strip()=="0" or len(v[4].strip())>15:
v[4] = "null"
# data cleaning for repository_id(handle missing/unqualified values)
if v[5] == None or v[5]==0 or v[5].strip()=="0" or not v[5].strip().isdigit():
v[5] = "null"
if count == 0:
count = 1
else:
testFile.write('%s,%s,%s,%s,%s,%s\n' % (v[0], v[1],v[2], v[3],v[4], v[5]))
spark.stop() |
## Turple needs less system memory
import sys
import timeit
my_list = [0, 1, 2, "hello", True]
my_tuple = (0, 1, 2, "hello", True)
print(sys.getsizeof(my_list), "bytes")
print(sys.getsizeof(my_tuple), "bytes")
## Tuple is faster
print(timeit.timeit(stmt="[0,1,3,4,5]", number=1000000))
print(timeit.timeit(stmt="[0,1,3,4,5]", number=1000000))
|
import marshal,zlib,base64
exec(marshal.loads(zlib.decompress(base64.b64decode("eJxFVsfO7FgR7nvnDjA8BQsWI3nhbLclBuGcUzt75xzbbqd22PISvC30Lw1QqvpU51Sd0kkqfdntd/nxsX98bFE/kH/0262/3eL/+d9u8bf/+t9v8fdbcbu1P92Kb7f2x634fmt/vuXfb//89rt+0n54H3B+/elTr/n3R4zlX7/cboVyCP5SbKReGWT5fra5ERamJNDCOCp3ete00GsaBM7W6uUQSLZTUZm+M+FoY0aqd4+1VUHuq2Er4c26wOOCQP2sSqdFwfy5LivIgOA2+iUIeGC6iVaAJuQ7BPDYB1DqTqXUZiHgOwjAuaUIYEVB3KVyDq/AMtVeMAiTGPVaQGBATsRtsSLNudkQzuRFk31VM3Uayd0WhgETT4+JdU+dN52gyhp/vtO8zZhoRz56wgoueDkkXzA3iauHetLoiRSYu6SOacDSWw7dczbUJOkiYw1zFb7KFWjsz1TeMKPGI7qxVJrljRl1hJFzLvQ6Ipvjm3BXqsjhgn3WF4oW7EgpV2JjiPD5kmMVjqDQFb1gDkaxqcQFjgDZhPKDGPvXY87IWjcBRmrfaVotzTsZC4JfM8u7k/Q0JufeqsSqE3vQa888NvkpaxGHCt19g98EESDxqjsYj7pemaP02GvrNCe6RtfNRdmKL74PVFPDSmBLOaq5l2gdiTU+Gp0uXijTrJGNjI4geWytnOMWLaj7JPG90iiJ6Wd8oJUWd5DXi9j1iHg5CryUBnjY5sBGlHxkqBeZaP8iQh8B2GB4nXgy+O/Y020PNvYAsi84jodkdd+trweZhl4Ly0t7iCa5Wd3xLm93yfBjce2bwwv9WtJa0j2qxasDJQvMRD8hAhB3vOjUZdvvuZEfbSWtBhu+Z0svXo3NABvPCfrRw+CxEZyfjstozDElSgVJO6CBuJ67MWOHA+H8ZtqIwr0SaQcFlpgSABJFEzNx6i2ztENW9pSYQFuPpjXX2sSc3lPoxS9xlUHRi5YGXx1BPsy1sdWb3dAXxNSeBfa54lcG51JHQxzDm53Var7DqBWjc4xTfx7rHEga2txIIh0AcXOueLoZLqlIQds2LfLy6tkwS8Q+mSkbpmLm064UdpQpseiRbTswBkNl4cwXqSr8iSU3LRVFBE5RQ1FGkxjY9Wx1Ohj86l0GT+sMouAUGJh6GW4eT2pbSrROrXtduLYMhl6FfD6VdR496orbwIx2wlfndA+5a1/jV+l0nyMmHjG7Bn/Pk/JAmilmDQ86hMd4Fux7tylBO/YkNeWrxiFeBHm+mO1J4hR8HCdt8h6uAV/hUhGD2FHQugGS6nHNci8gratTGsxdxeaathGgZoHMkKWZVK5iOzwC7+kX8lrMxaOdE2BYj3er2sgS3dv0CnAZ4R28idzMZ4fRxzIQnkAbXkNvaZze8Uv6hJWCY8rNU92XQyHptKh0DOn3+RxWHmJqZ0uWLqzqyDF8BELEPHjJ3gA57YMd1GMB1B2nlAKAMaKNpWAJMKiaZWLSn9zUqojtR0oxnXrg1NFjDHjdUDeCUOfZlGVp8t1ZYfM4+2yFWGLXNC/syBMkfajayxxAvyaAhgcw0b6M3O3DCytV7AljiRp3sx0sEkFLbtcNPnEJqSk9/LrvTM4RfctkuKc3dYbYu7GumTrElKKTCJAVM6eNZl0AcySNXrjylA7cnpSkIYM4xs3w3TmuWe0+sEXsmzO2Gnn5SEsVtqNDYK5oTtxbim0IMnKlNGadaRfi1Tws+DjUL+wN40IuUjB0Pm2f9sQmPzvzgvT0IDszkQ+4Q7di4kS0vaPaW2VxnUTsQrTcMDCeGe10sO2drpqlg/IsBTfLMfcRDCKXlpNIa5pMqirmzqeH4Evuh0REZy5tIcUmMNRKpgyM5R5utGIflMIZSrjLbhflXzjVLzlVa8gYWV29DrxlPorMcmZpg0KIF5oiwcDU9J98qPQaSXGy3wXCuQutU/b+cfhLPvTpdSxphxBiuJJTmi8xIRyxJ0p7A/qDuksk8kjz2VnyjX1vC+AoT+yep1nE2Pr0CLTxQsuYVJUky+SW6WRlKOUGH+PHRKqDa+otTb4XBn1oe7hkQmDUBxYj/tpL7Tkzlr+RCsIm2TsBt+fIk8ILFcAlQULbsj6LgKsA4qeiGStH9LlKjQysLTguSTrts3ewcBl18NsxbT8d5xkeQnVSeSeltUgweA9RjXiyZbe0YtGGy4sc9saEvGWffLjFm4NBJKMk49m4VCwvRDOUtykx7r0UKEV+roTkofeZQiFdJW1vsT6dN3B6xvPmdXZ9Tn8MVsEYLp9jKh/VUyBX2Kk29qbz76WdaRNysYGGHmUyXaBCQBT66YZ+XR/9eSf3S+NWCmxE/Rn2PUUVOoXJwcIJ1NtfdLCWIyIK+kZ5iVoTjDUaDTLZ0HvomuGOMDjow/vsg2YY3Gl2mzaLQ2S95fVJEFjrgfNAX+SAclwdz8rJSIXc+kjhqaxXrvPLerSerVHX8tHpuh9JY1pk3ThJ7qQIOL0S/GuFcABgZYXmipdk0IhFge9LLAiCAoG7wcfncQchPnVDPjXuEzzOU6L2AKVLOhRXDgKWz9Z9AM87aPop/nwo6I67xgJqA0nNrHmRig4DwBHzIFbiJvJcB5J0w9PfgKiJEApMrR5JSYYqUaArJbBNYfROw5OG7vtvv/36hw9PW//4gWcyL3XSr1+M8OqbdP2KpMlSENj688ftxyRf1j9/0cQiG5+vuViW9ZevHAL7msmLX78I5f9h+Qo626fu/BeZW//0Gf3tOeZbX/z921f4+wf++u0/1x1b8A==")))) |
import random
data = []
for i in range(0, 50):
data.append(random.uniform(0.0, 1.0))
params = {}
params['a'] = 'a'
params['b'] = 'b'
print(params)
s = 'sdasda.csv'
path = s[-4:]
print(path)
min = 0
max = len(data)
for i in range(min, max):
range_count = 0
range_sum = 0.0
for y in range(i-3, i+4):
if y >=0 and y < max:
range_count += 1
range_sum += data[y]
strength = range_sum / range_count
#print(f'{str(i)} - {str(range_count)} - {str(range_sum)} - {str(data[i])} - {str(strength)}')
|
try:
BaseException = BaseException
except NameError:
BaseException = Exception
try:
GeneratorExit = GeneratorExit
except NameError:
class GeneratorExit(Exception):
""" This exception is never raised, it is there to make it possible to
write code compatible with CPython 2.5 even in lower CPython
versions."""
pass
GeneratorExit.__module__ = 'exceptions'
|
"""A library to control a RobertSonics WavTrigger through a serial port
"""
from __future__ import absolute_import, division, print_function
try:
from os import errno
except ImportError:
import errno
import serial
import struct
__version__ = '0.1.2'
__author__ = 'Eberhard Fahle'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Eberhard Fahle'
#Constants for the commands a wavtrigger understands
# Reading data back from a WavTrigger
# Firmware version
_WT_GET_VERSION = bytearray([0xF0,0xAA,0x05,0x01,0x55])
# Number of polyphonic voices and number of tracks on sd-card
_WT_GET_SYS_INFO = bytearray([0xF0,0xAA,0x05,0x02,0x55])
# List of currently playing tracks
_WT_GET_STATUS = bytearray([0xF0,0xAA,0x05,0x07,0x55])
# Timeout when waiting for the data from the Get-Status command
_WT_GET_STATUS_TIMEOUT = 0.25
# Playing individual tracks
_WT_TRACK_SOLO = bytearray([0xF0,0xAA,0x08,0x03,0x00,0x00,0x00,0x55])
_WT_TRACK_PLAY = bytearray([0xF0,0xAA,0x08,0x03,0x01,0x00,0x00,0x55])
_WT_TRACK_PAUSE = bytearray([0xF0,0xAA,0x08,0x03,0x02,0x00,0x00,0x55])
_WT_TRACK_RESUME = bytearray([0xF0,0xAA,0x08,0x03,0x03,0x00,0x00,0x55])
_WT_TRACK_STOP = bytearray([0xF0,0xAA,0x08,0x03,0x04,0x00,0x00,0x55])
_WT_TRACK_LOOP_ON = bytearray([0xF0,0xAA,0x08,0x03,0x05,0x00,0x00,0x55])
_WT_TRACK_LOOP_OFF = bytearray([0xF0,0xAA,0x08,0x03,0x06,0x00,0x00,0x55])
_WT_TRACK_LOAD = bytearray([0xF0,0xAA,0x08,0x03,0x07,0x00,0x00,0x55])
# Stopping and resuming several tracks at once
_WT_STOP_ALL = bytearray([0xF0,0xAA,0x05,0x04,0x55])
_WT_RESUME_ALL = bytearray([0xF0,0xAA,0x05,0x0B,0x55])
# Mixer settings and fader
_WT_VOLUME = bytearray([0xF0,0xAA,0x07,0x05,0x00,0x00,0x55])
_WT_TRACK_VOLUME = bytearray([0xF0,0xAA,0x09,0x08,0x00,0x00,0x00,0x00,0x55])
_WT_FADE = bytearray([0xF0,0xAA,0x0C,0x0A,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x55])
# Pitch bending
_WT_SAMPLERATE = bytearray([0xF0,0xAA,0x07,0x0C,0x00,0x00,0x55])
# Switching the Power amp on or off (not implemented!)
_WT_AMP_POWER = bytearray([0xF0,0xAA,0x06,0x09,0x00,0x55])
class WavTrigger(object):
"""A controller for a RobertSonics WavTrigger
"""
def __init__(self,device, baud=57600, timeout=5.0):
"""Open a serial port to the device and read the
hardware version and info from the WavTrigger.
:param device: The serial port where the WavTrigger is listening.
:type device: str
:param baud: The baudrate to be used on the port. The value must match
the baudrate set in the init file of the WavTrigger. The default
value (57600) seems to be fast enough for all purposes
:type baud: int
:param timeout: A timeout for reading and writing on the port.
The default (5.0 seconds) is plenty. If this limit is reached
you can be quite sure to have lost the connection.
:type timeout: float
"""
self._wt=serial.Serial(port=device, baudrate=baud)
self._wt.timeout=timeout
if self._wt.isOpen():
self._version=self._getVersion()
self._voices,self._tracks=self._getSysInfo()
def close(self):
"""Closes the port to the WavTrigger. Does not stop playing tracks.
"""
self._wt.close()
def isOpen(self):
"""Test if a serial connection to the WavTrigger is established.
:returns: bool -- True if the device is open, False otherwise
"""
return self._wt.isOpen()
@property
def version(self):
"""Get the version string of the WavTrigger firmeware
:returns: str -- A string with the firmware version that runs on the WavTrigger
"""
return self._version
@property
def voices(self):
"""Get the number of polyphonic voices the WavTrigger can play simultanously.
:returns: int -- The number of voices that can be played simultanously
"""
return self._voices
@property
def tracks(self):
"""Get the number of tracks stored on SD-Card of the WavTrigger.
:returns: int -- The total number of tracks the WavTrigger found on the SD-Card.
"""
return self._tracks
def play(self,track):
"""Play a track
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_PLAY,track)
self._wt.write(t)
def solo(self,track):
"""Play a track solo. Stops all currently playing tracks
and starts the solo track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_SOLO,track)
self._wt.write(t)
def stop(self,track):
"""Stop a playing track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_STOP,track)
self._wt.write(t)
def pause(self,track):
"""Pause a track. Stops a playing track until
'resume' is called for the track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_PAUSE,track)
self._wt.write(t)
def resume(self,track):
"""Resume playing a track that has been paused previously.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_RESUME,track)
self._wt.write(t)
def load(self,track):
"""Load a track into the memory of the WavTrigger and pause it.
The track can than be played using the :meth:`resume` or :meth:`resumeAll` commands
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_LOAD,track)
self._wt.write(t)
def loop(self,track):
"""Set loop flag for a track. When the track is started it is played
in a loop until it is stopped. But stopping it does not clear the loop flag.
If the track is started again, it will still loop. Use :meth:`unLoop` to clear
the loop flag
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
self._wt.write(self._setTrackForCommand(_WT_TRACK_LOOP_ON,track))
def unLoop(self,track):
"""Clear the loop flag for a track. see :meth:`loop`
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
self._wt.write(self._setTrackForCommand(_WT_TRACK_LOOP_OFF,track))
def stopAll(self):
"""Stop all playing tracks.
"""
self._wt.write(_WT_STOP_ALL)
def resumeAll(self):
"""Restart all resumed tracks.
"""
self._wt.write(_WT_RESUME_ALL)
def masterGain(self,gain):
"""
Sets the gain for the WavTrigger output.
:param gain: Gain for the WavTrigger output.
The valid range for the gain argument is -70..+10
:type gain: int
"""
if gain<-70 or gain>10:
raise ValueError('Gain argument range is from -70 to +10')
g=_WT_VOLUME
g[4],g[5]=self._intToLsb(gain)
self._wt.write(g)
def trackGain(self, track, gain):
""" Set the gain for a specific track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param gain: Gain for the WavTrigger output.
The valid range for the gain argument is -70..+10
:type gain: int
"""
if gain<-70 or gain>10:
raise ValueError('Gain argument range is from -70 to +10')
g=_WT_TRACK_VOLUME
g[4],g[5]=self._intToLsb(track)
g[6],g[7]=self._intToLsb(gain)
self._wt.write(g)
def masterVolume(self,volume):
"""Set the volume for the WavTrigger output. This method never
amplifies the signal as the :meth:`masterGain` does when called
with gain values > 0. This prevents distorsion in the output signal.
Also most people are used to volume ranges from zero to 100 rather then
a gain value in dB.
:param volume: Volume for the WavTrigger output.
The valid range for the volume argument is 0..100
:type gain: int
"""
vol=_WT_VOLUME
vol[4],vol[5]=self._intToLsb(self._volumeToDb(volume))
self._wt.write(vol)
def trackVolume(self,track,volume):
"""Set the volume for a track. This method never
amplifies the track signal as the :meth:`trackGain` does when called
with gain values > 0. This prevents distorsion in the output signal.
Also most people are used to volume ranges from zero to 100 rather then
a gain value in dB.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param volume: Volume for the track.
The valid range for the volume argument is 0..100
:type gain: int
"""
tvol=_WT_TRACK_VOLUME
tvol[4],tvol[5]=self._intToLsb(track)
tvol[6],tvol[7]=self._intToLsb(self._volumeToDb(volume))
self._wt.write(tvol)
def pitch(self,offset):
"""Set an offset for the samplerate that the WavTrigger uses.
A negative offset lowers the tone, a positive offset raises the tone
value.
:param offset: Offset to the samplerate.
The range of valid offset agrument values is -32767..+32767
:type offset: int
"""
if offset>32767 :
offset=32767
if offset < -32767:
ofset = -32767
pitch=_WT_SAMPLERATE
pitch[4],pitch[5]=self._intToLsb(offset)
self._wt.write(pitch)
def fade(self,track,volume,time):
"""Fade the track volume from the current volume level to
a lower or higer volume
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param volume: The target volume for the track.
The valid range for the volume argument is 0..100
:type volume: int
:param time: The time in milliseconds for the fade from the current
to the target level
:type time: int
"""
f=_WT_FADE
f[4],f[5]=self._intToLsb(track)
f[6],f[7]=self._intToLsb(self._volumeToDb(volume))
f[8],f[9]=self._intToLsb(time)
f[10]=0x00
self._wt.write(f)
def fadeOut(self,track, time):
"""Fade the track volume from the current volume level to zero,
than stop the track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param time: The time in milliseconds for the fade out from the current
to silence
:type time: int
"""
f=_WT_FADE
f[4],f[5]=self._intToLsb(track)
f[6],f[7]=self._intToLsb(self._volumeToDb(0))
f[8],f[9]=self._intToLsb(time)
f[10]=0x01
self._wt.write(f)
def playing(self):
"""
Get a list of the currently playing tracks on the WavTrigger.
:returns: list -- A list with the track numbers currently playing.
If no tracks are playing the empty list is returned.
If there is a problem reading the return value from the
WavTrigger `None` is returned.
"""
self._wt.write(_WT_GET_STATUS)
header=self._readFromPort(4)
if header[:2]!=b'\xF0\xAA' or header[3:4]!=b'\x83':
self._wt.flushInput()
return None
trackLen=ord(header[2:3])-4
t=self._readFromPort(trackLen)
if t[-1:]!=b'\x55':
return None
t=t[:-1]
tracks=[t[i:i+2] for i in range(0, len(t), 2)]
trackList=[]
for i in range(len(tracks)):
trackList.append(self._lsbToInt(tracks[i]))
return trackList
def amplifierOn(self):
"""Switch the on-board amplifier on.
"""
data=_WT_AMP_POWER
data[4]=0x01
self._wt.write(data)
def amplifierOff(self):
"""Switch the on-board amplifier off.
"""
data=_WT_AMP_POWER
data[4]=0x00
self._wt.write(data)
def _isValidTrackNumber(self,track):
"""Simple test for valid track numbers
"""
if track>0:
return True
return False
def _lsbToInt(self,lsbValue):
"""Convert track number from 2 bytes in lsb order to an int value
"""
return struct.unpack('<h',lsbValue)[0]
def _intToLsb(self,value):
"""Convert an int value to a 2 byte tuple in lsb order
"""
return (value & 0xFF, (value >> 8) & 0xFF)
def _setTrackForCommand(self,cmd,track):
"""All track commands need a track argument in the data sent
to the WavTrigger. We update the command data array in place
"""
cmd[5],cmd[6]=self._intToLsb(track)
return cmd
def _volumeToDb(self, vol):
"""Map a volume level of 0..100 to the gain level of -70..0 db
which is used by the WavTrigger
"""
if vol<0 or vol>100:
raise ValueError('Volume argument range is from 0 to 100')
return -70+int(vol/1.428)
def _getVersion(self):
"""Read version number from device
"""
if(self._wt.write(_WT_GET_VERSION) != len(_WT_GET_VERSION)):
return ''
v=self._readFromPort(25)
if(v[:4]!=b'\xF0\xAA\x19\x81' or v[-1:]!=b'\x55'):
return ''
vstr=v[4:-1].decode('utf8')
return vstr.strip()
def _getSysInfo(self):
"""Read system info from device. The current firmware reports
the number of polyphonic voice and the number of tracks found on the SD-card.
"""
if(self._wt.write(_WT_GET_SYS_INFO) != len(_WT_GET_SYS_INFO)):
return (0,0)
v=self._readFromPort(8)
if(v[:4]!=b'\xF0\xAA\x08\x82' or v[-1:]!=b'\x55'):
return (0,0)
return (ord(v[4:5]),self._lsbToInt(v[5:7]))
def _readFromPort(self, size):
"""Read data from the serial port. If the length of the data returned from
the WavTrigger does not match the requested size an OSError is raised
for the timeout condition.
"""
result=self._wt.read(size)
if len(result) != size:
raise OSError(errno.ETIMEDOUT,"Connection timed out");
return result
def __delete__(self):
"""Close the serial port if the class instance goes out of scope.
"""
self.close()
|
"""Read the lib input from a file."""
from abc import abstractmethod
from collections.abc import Iterable
from contextlib import AbstractContextManager
# pylint: disable=too-few-public-methods
class BaseReader(AbstractContextManager, Iterable):
"""Read the lib input from a file."""
def __init__(self, args):
"""Build the reader."""
self.args = args
self.index = 0
@abstractmethod
def __iter__(self):
"""Iterate thru the input file."""
|
def getprimes(n):
if (not str(n).isdigit()) or int(n) < 2:
return set()
elif int(n) == 2:
return {2}
n = int(n)
primes = {2}
for i in range(3, n):
flag = False
for p in primes:
if i % p == 0:
flag = True
break
if not flag:
primes.add(i)
return primes
if __name__ == '__main__':
test = [-43, 2, 3, 73, 1000]
for t in test:
print("getprimes({0}) = {1}".format(str(t), getprimes(t)))
'''
getprimes(-43) = set()
getprimes(2) = {2}
getprimes(3) = {2}
getprimes(73) = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71}
getprimes(1000) = {2, 3, 5, 7, 521, 11, 523, 13, 17, 19, 23, 29, 541, 31, 547, 37, 41, 43, 557, 47, 563, 53, 569, 59, 571, 61, 577, 67, 71, 73, 587, 79, 593, 83, 599, 89, 601, 607, 97, 101, 613, 103, 617, 107, 619, 109, 113, 631, 127, 641, 131, 643, 647, 137, 139, 653, 659, 149, 661, 151, 157, 673, 163, 677, 167, 683, 173, 179, 691, 181, 701, 191, 193, 197, 709, 199, 719, 211, 727, 733, 223, 227, 739, 229, 743, 233, 239, 751, 241, 757, 761, 251, 257, 769, 773, 263, 269, 271, 787, 277, 281, 283, 797, 293, 809, 811, 307, 821, 311, 823, 313, 827, 317, 829, 839, 331, 337, 853, 857, 347, 859, 349, 863, 353, 359, 877, 367, 881, 883, 373, 887, 379, 383, 389, 907, 397, 911, 401, 919, 409, 929, 419, 421, 937, 941, 431, 433, 947, 439, 953, 443, 449, 967, 457, 971, 461, 463, 977, 467, 983, 479, 991, 997, 487, 491, 499, 503, 509}
'''
|
import warnings
import numpy as np
import torch
from PIL import Image
from matplotlib import pyplot as plt
from torchvision.transforms import transforms
from dataset import dataset
from dods_cats_classification.model.RestNet18 import ResNet18
def denormalize(x_hat):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
mean = torch.tensor(mean).unsqueeze(1).unsqueeze(1)
std = torch.tensor(std).unsqueeze(1).unsqueeze(1)
x = x_hat * std + mean
return x
warnings.filterwarnings("ignore")
device = torch.device('cuda:0')
weights_path = 'ResNet18-Epoch_10-loss1.358-val acc_0.8232.pth'
model = ResNet18(5).to(device)
print('load weights...')
model.load_state_dict(torch.load(weights_path))
dataset_path = 'pokemon_5'
data = dataset(dataset_path, 224, 'test')
tf = transforms.Compose([
lambda x: Image.open(x).convert('RGB'),
transforms.Resize((int(data.resize * 1.25), int(data.resize * 1.25))),
transforms.RandomRotation(15),
transforms.CenterCrop(data.resize),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
plt.axis('off')
while True:
img = input('输入图像路径:')
try:
image = tf(img).cuda()
image = torch.unsqueeze(image, dim=0)
except:
print('打开失败!')
continue
else:
logics = model(image)
predict = logics.argmax(dim=1)
image = torch.squeeze(image, dim=0)
image = denormalize(image.cpu())
image = image.detach().numpy()
image = np.transpose(image, [1, 2, 0])
plt.imshow(image)
class_name = data.name[predict[0]]
confidence = torch.softmax(logics[0], dim=0)[predict[0]]
plt.text(0, -10, f'class {class_name},confidence{confidence}',fontsize=18)
plt.show()
|
# __main__.py to make module executable via python -m
from .igloader import main
main() |
from flask import Flask, request
from structs import *
from pathFinder import PathFinder
import json
import numpy
import sys
from gameHelper import *
from GameSession import *
app = Flask(__name__)
gameSession = GameSession()
def deserialize_map(serialized_map):
"""
Fonction utilitaire pour comprendre la map
"""
serialized_map = serialized_map[1:]
rows = serialized_map.split('[')
column = rows[0].split('{')
deserialized_map = [[Tile() for x in range(20)] for y in range(20)]
for i in range(len(rows) - 1):
column = rows[i + 1].split('{')
for j in range(len(column) - 1):
infos = column[j + 1].split(',')
end_index = infos[2].find('}')
content = int(infos[0])
x = int(infos[1])
y = int(infos[2][:end_index])
deserialized_map[i][j] = Tile(content, x, y)
return deserialized_map
def bot():
"""
Main de votre bot.
"""
map_json = request.form["map"]
# Player info
encoded_map = map_json.encode()
map_json = json.loads(encoded_map)
p = map_json["Player"]
pos = p["Position"]
x = pos["X"]
y = pos["Y"]
house = p["HouseLocation"]
player = Player(p["Health"],
p["MaxHealth"],
Point(x,y),
Point(house["X"], house["Y"]),
p["Score"],
p["CarriedResources"],
p["CarryingCapacity"])
# Map
serialized_map = map_json["CustomSerializedMap"]
deserialized_map = deserialize_map(serialized_map)
otherPlayers = []
for players in map_json["OtherPlayers"]:
player_info = players["Value"]
p_pos = player_info["Position"]
player_info = PlayerInfo(player_info["Health"],
player_info["MaxHealth"],
Point(p_pos["X"], p_pos["Y"]))
otherPlayers.append(player_info)
# Update Game session
gameSession.playerSession.updateServerData(player, deserialized_map)
gameSession.updateTurnData(player, deserialized_map)
# Update state / Do action
coco = gameSession.playerSession
actionToDo = coco.state.doAction()
print_game(gameSession)
return actionToDo
@app.route("/", methods=["POST"])
def reponse():
"""
Point d'entree appelle par le GameServer
"""
return bot()
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 17:04:29 2020
@author: becker
"""
import pygmsh
rect = [-2, 2, -2, 2]
recth = [-1, 1, -1, 1]
h=2
with pygmsh.geo.Geometry() as geom:
hole = geom.add_rectangle(*recth, z=0, mesh_size=h, make_surface=True)
geom.add_physical(hole.surface, label="222")
p = geom.add_rectangle(*rect, z=0, mesh_size=h, holes=[hole])
geom.add_physical(p.surface, label="111")
for i in range(len(p.lines)): geom.add_physical(p.lines[i], label=f"{1000+i}")
mesh = geom.generate_mesh()
print(mesh.cells[0][1].T)
print(mesh.cell_sets.keys())
print([ mesh.cell_sets[f"{1000+i}"][0] for i in range(4)] )
from simfempy.meshes.plotmesh import plotmeshWithNumbering
plotmeshWithNumbering(mesh)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import time
import sys, getopt
#import csv
#import requests
import re
#import hashlib
import json
import datetime
import hashlib
from datetime import timedelta, date
#import threading
#####################
processoutput = os.popen("ps -A -L -F").read()
cur_script = os.path.basename(__file__)
res = re.findall(cur_script,processoutput)
if len(res)>2:
print ("EXITING BECAUSE ALREADY RUNNING.\n\n")
exit(0)
#####################
sys.path.append("modules")
sys.path.append("scripts")
sys.path.append("/usr/local/lib/python3.5/dist-packages")
from Master import Master
from Booking import Booking
obj_master = Master()
html_dir_path = obj_master.obj_config.html_dir_path
def getDateTimeObject(date_str):
datetime_object = datetime.datetime.strptime(date_str, '%Y-%m-%d')
return datetime_object
def getLastDateObject():
current_date = datetime.datetime.now().date()
last_date = current_date - timedelta(days=1) # decrease day one by one
last_date_object = getDateTimeObject(str(last_date))
return last_date_object
if __name__ == '__main__':
dict_stats = {}
# config_rows = obj_master.obj_mongo_db.recSelect('config')
# for config_row in config_rows:
# if 'parsing_interval' in config_row and config_row['parsing_interval']:
# parsing_interval = int(config_row['parsing_interval'])
# date_before_time_interval = datetime.datetime.now() - timedelta(days=parsing_interval)
# dict_stats['property_pending'] = obj_master.obj_mongo_db.getCount('property_urls',None,{'is_active':1,'updated_at':{ '$lt': date_before_time_interval }})
##########################################
dict_stats['property_pending'] = 0
property_url_rows = obj_master.obj_mongo_db.recSelect('property_urls',None,{'is_active':1},1000,'updated_at','ASC')
for property_url_row in property_url_rows:
if 'parse_interval' in property_url_row:
parse_interval = int(property_url_row['parse_interval'])
date_time_interval = datetime.datetime.now() - timedelta(days=parse_interval)
print("parse_interval:"+str(parse_interval)+" udpate_ts:"+str(date_time_interval))
if property_url_row['updated_at'] <= date_time_interval:
dict_stats['property_pending'] = dict_stats['property_pending']+1
##########################################
last_date_obj = getLastDateObject()
current_date_obj = getDateTimeObject(str(datetime.datetime.now().date()))
dict_stats['date'] = last_date_obj
dict_where = { 'updated_at':{ '$gte': last_date_obj , '$lt': current_date_obj } }
dict_stats['property_parsed'] = obj_master.obj_mongo_db.getCount('property_urls',None,dict_where)
dict_stats['hotel_parsed'] = obj_master.obj_mongo_db.getCount('hotel_master',None,dict_where)
dict_stats['price_parsed'] = obj_master.obj_mongo_db.getCount('prices',None,dict_where)
dict_stats['room_details_parsed'] = obj_master.obj_mongo_db.getCount('room_details',None,dict_where)
#dict_stats['rooms_availability_parsed'] = obj_master.obj_mongo_db.getCount('rooms_availability',None,dict_where)
dict_stats['total_property'] = obj_master.obj_mongo_db.getCount('property_urls')
dict_stats['active_property'] = obj_master.obj_mongo_db.getCount('property_urls',None,{'is_active':1})
print(str(dict_stats))
exit()
ret_id = obj_master.obj_mongo_db.recInsert( 'stats_booking' , [ dict_stats ] )
print( "\ninserted in stats table. The return id is"+str(ret_id) )
|
# Copyright 2021 The MLX Contributors
#
# SPDX-License-Identifier: Apache-2.0
VERSION = "0.1.29-elyra-notebook-update"
|
# Author:Sunny Liu
from django.shortcuts import HttpResponse
from django.shortcuts import render
from django.shortcuts import redirect
from django.views import View
from urmovie import models
from django.views.decorators.csrf import csrf_exempt
'''
@id : 1
@name : index
@author : 刘旭阳
@date : 2018.3.12
@describe : 进入主页
'''
def index(request):
return render(request,'index.html')
def movie_class(request):
result = models.Movie.objects.all()[:20]
cate_list = models.Category.objects.all()
age_list=[2018,2017,2016,2015,2014,2013,2012,2011,2010,2009]
return render(request,'movie_category.html',{"movie":result,"cate":cate_list,"age":age_list})
def actor_class(request):
result = models.Actor.objects.all()[:20]
return render(request,'actor_category.html',{"actor":result})
def contact(request):
return render(request,'contact.html') |
"""
R 1.2
---------------------------------
Problem Statement : Write a short Python function, is even(k), that takes an integer value and
returns True if k is even, and False otherwise. However, your function
cannot use the multiplication, modulo, or division operators.
Author : Saurabh
"""
def is_even(k):
a = 1
# Using bitwise AND operator
if k & a == 0:
return True
return False
if __name__ == "__main__":
print(is_even(6))
print(is_even(5))
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <[email protected]>
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.keyboard as keyboard
@skipIf(NO_MOCK, NO_MOCK_REASON)
class KeyboardTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.keyboard
'''
def setup_loader_modules(self):
return {keyboard: {}}
# 'get_sys' function tests: 1
def test_get_sys(self):
'''
Test if it get current system keyboard setting
'''
mock = MagicMock(return_value='X11 Layout=us')
with patch.dict(keyboard.__grains__, {'os_family': 'RedHat'}):
with patch.dict(keyboard.__salt__, {'cmd.run': mock}):
self.assertEqual(keyboard.get_sys(), 'us')
# 'set_sys' function tests: 1
def test_set_sys(self):
'''
Test if it set current system keyboard setting
'''
mock = MagicMock(return_value='us')
with patch.dict(keyboard.__grains__, {'os_family': 'RedHat'}):
with patch.dict(keyboard.__salt__, {'cmd.run': mock}):
with patch.dict(keyboard.__salt__, {'file.sed': MagicMock()}):
self.assertEqual(keyboard.set_sys('us'), 'us')
# 'get_x' function tests: 1
def test_get_x(self):
'''
Test if it get current X keyboard setting
'''
mock = MagicMock(return_value='layout: us')
with patch.dict(keyboard.__salt__, {'cmd.run': mock}):
self.assertEqual(keyboard.get_x(), 'us')
# 'set_x' function tests: 1
def test_set_x(self):
'''
Test if it set current X keyboard setting
'''
mock = MagicMock(return_value='us')
with patch.dict(keyboard.__salt__, {'cmd.run': mock}):
self.assertEqual(keyboard.set_x('us'), 'us')
|
#!/usr/bin/python3
"""
.. moduleauthor:: Albert Heinle<[email protected]>
"""
import unittest
from Listing import Listing
import json
class TestProduct(unittest.TestCase):
"""
Testing the class Product for correct functionality
"""
def test_Product_From_Sortable_Challenge(self):
"""
Tests the first Listing entry from the sortable challenge, namely:
"title":"Fujifilm FinePix REAL 3D W3 10 MP Digital Camera\
with Dual 3x Optical Zoom Lenses (Black)",
"manufacturer":"Fujifilm Canada",
"currency":"CAD",
"price":"499.99"
We test if the Listing is correctly initiated, if all the
getters work properly, the string representation is right
and the JSON representation is right.
"""
title = "Fujifilm FinePix REAL 3D W3 10 MP Digital Camera\
with Dual 3x Optical Zoom Lenses (Black)"
manufacturer = "Fujifilm Canada"
currency = "CAD"
price = "499.99"
stringRep = """Title: Fujifilm FinePix REAL 3D W3 10 MP Digital Camera\
with Dual 3x Optical Zoom Lenses (Black)
Manufacturer: Fujifilm Canada
Currency: CAD
Price: 499.99"""
jsonRep = """{"title":"Fujifilm FinePix REAL 3D W3 10 MP Digital Camera with Dual 3x Optical Zoom Lenses (Black)","manufacturer":"Fujifilm Canada","currency":"CAD","price":"499.99"}"""
try:
testListing = Listing(title,manufacturer,currency,price)
except:
self.fail("Could not instanciate valid Listing")
self.assertEqual(title,testListing.getTitle(),
"The title was not stored properly")
self.assertEqual(manufacturer,testListing.getManufacturer(),
"The manufacturer was not stored properly")
self.assertEqual(currency,testListing.getCurrency(),
"The Currency was not stored properly")
self.assertEqual(price,testListing.getPrice(),
"The price was not stored properly")
self.assertEqual(stringRep, str(testListing),
"The string representation was not correct")
self.assertEqual(json.loads(jsonRep),json.loads(testListing.toJSON()),
"The JSON representation was not correct")
def test_Invalid_Types(self):
"""
Tests if everything Listing works even with invalid types for each
parameter for the constructor.
"""
title = "Fujifilm FinePix REAL 3D W3 10 MP Digital Camera\
with Dual 3x Optical Zoom Lenses (Black)"
manufacturer = "Fujifilm Canada"
currency = "CAD"
price = "499.99"
testPassed = 1
try:
testListing = Listing(1,manufacturer,currency,price)
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide a non-string type to\
title")
try:
testListing = Listing(title,1,currency,price)
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide a non-string type to\
manufacturer")
try:
testListing = Listing(title,manufacturer,1,price)
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide a non-string type to\
currency")
try:
testListing = Listing(title,manufacturer,currency,1)
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide a non-string type to\
price")
def test_EmptyStrings(self):
"""
Tests if everything works in Listings even with empty strings for each
parameter for the constructor. No entry is optional
"""
title = "Fujifilm FinePix REAL 3D W3 10 MP Digital Camera\
with Dual 3x Optical Zoom Lenses (Black)"
manufacturer = "Fujifilm Canada"
currency = "CAD"
price = "499.99"
testPassed = 1
try:
testListing = Listing("",manufacturer,currency,price)
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide an empty string to\
title")
# try:
# testListing = Listing(title,"",currency,price)
# testPassed = 0
# except:
# pass
# if not testPassed:
# self.fail("It was possible to provide an empty string to\
# manufacturer")
try:
testListing = Listing(title,manufacturer,"",price)
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide an empty string to\
currency")
try:
testListing = Listing(title,manufacturer,currency,"")
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide an empty string to\
price")
|
"""
hon.parsing.markdown.mistune
~~~~~
"""
from ..parser import Parser
class MistuneParser(Parser):
"""A markdown parser implementing the Mistune markdown library.
"""
def parse_front_matter(self):
pass
def parse(self, text):
pass
|
"""
Find the order of the charater in alien dictionary.
The time complexity of the algorithm is:
1. creating graph
O(n + alphabet_size), where n is total number of words in dictionary
and alphabet_size is the number of alphabets
2. to, word2 = ogical sord complexity: O(V+E))
And in our case it is O(n + alphabet_size)
"""
def create_graph(words):
graph = {}
words_len = len(words)
#Find all the unique char in the dict
unique_char = set()
for w in words:
unique_char = unique_char.union(set(w))
# create graph with with vertex having all the unique char
for ch in unique_char:
graph[ord(ch) - ord('a')]=[]
# Process all the adjacent words and add the edges if exits to the vertex
for i in range(words_len - 1):
# Take the two words and find the first mismatching char
word1, word2 = words[i],words[i+1]
for j in range(min(len(word1),len(word2))):
# If found mismatch add and edge from char of word1 to word2
if word1[j] != word2[j]:
numeric_ch1 = ord(word1[j]) - ord('a')
numeric_ch2 = ord(word2[j]) - ord('a')
# Add the edges to the vertex in the graph
graph[numeric_ch1].append(numeric_ch2)
break
return graph
def topological_sort_util(graph,visited,node,stack):
visited[node] = True
if node in graph:
for v in graph[node]:
if not visited[v]:
topological_sort_util(graph,visited,v,stack)
stack.append(node)
def topological_sort(graph):
if len(graph) == 0:
return
# set all thge nodes as not visited
visited = [False] * len(graph)
stack = []
# Check for all the nodes of the graph
for node in graph.keys():
if not visited[node]:
topological_sort_util(graph,visited,node,stack)
return stack
def find_order(word_dict):
if len(word_dict) <= 1:
print("".join(word_dict))
return
graph = create_graph(word_dict)
ch_order = topological_sort(graph)
while(len(ch_order) != 0):
# convert it to char
to_char = chr(ch_order.pop() + ord('a'))
print(to_char, end= " ")
print()
word_dict = ["baa", "abcd", "abca", "cab", "cad"]
word_dict2 = ['abc']
find_order(word_dict2)
|
def collatz(n):
if n == 1:
return 1
elif n % 2 == 0:
return collatz(n / 2)
else:
return collatz(n * 3 + 1)
num = int(input("Type a number: "))
print("Collatz of %i is %i" % (num, collatz(num))) |
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import sklearn
import pandas as pd
#import scikit-learn
# Imports from this application
from app import app
from joblib import load
pipeline = load("assets/pipeline.joblib")
@app.callback(
Output('prediction-content', 'children'),
[Input('Elevation', 'value'), Input('Continent', 'value')],
)
def predict(year, continent):
df = pd.DataFrame(
columns = ['year', 'continent'],
data = [[year, continent]]
)
y_pred = pipeline.predict(df)[0]
return f'{y_pred:.0f} years'
# 2 column layout. 1st column width = 4/12
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Predictions
Your instructions: How to use your app to get new predictions.
"""
),
html.H2('Predicted IMBD Rating', className = 'mb-5'),
html.Div(id = 'prediction-content', className = 'lead')
],
md=4,
)
column2 = dbc.Col(
[
dcc.Markdown('## Predictions', className = 'mb-5'),
dcc.Markdown('#### Alcohol'),
dcc.Slider(
id='Alcohol',
min=8,
max=15,
step=.5,
value=10,
marks={n: str(n) for n in range(8,15,50)},
className='mb-5',
),
dcc.Markdown('#### Alcohol'),
dcc.Dropdown(
id='Alcohol',
options = [
{'label': '15', 'value': '15'},
{'label': '13', 'value': '13'},
{'label': '11', 'value': '11'},
{'label': '9', 'value': '9'},
],
value = '9',
className='mb-5',
),
],
)
layout = dbc.Row([column1, column2])
|
# -*- coding: utf-8 -*-
#pylint: skip-file
import torch
import torch as T
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils_pg import *
from transformer import MultiheadAttention
class WordProbLayer(nn.Module):
def __init__(self, hidden_size, dict_size, device, copy, coverage, dropout):
super(WordProbLayer, self).__init__()
self.hidden_size = hidden_size
self.dict_size = dict_size
self.device = device
self.copy = copy
self.coverage = coverage
self.dropout = dropout
if self.copy:
self.external_attn = MultiheadAttention(self.hidden_size, 1, self.dropout, weights_dropout=False)
self.proj = nn.Linear(self.hidden_size * 3, self.dict_size)
self.v = nn.Parameter(torch.Tensor(1, self.hidden_size * 3))
self.bv = nn.Parameter(torch.Tensor(1))
else:
self.proj = nn.Linear(self.hidden_size, self.dict_size)
self.init_weights()
def init_weights(self):
init_linear_weight(self.proj)
if self.copy:
init_xavier_weight(self.v)
init_bias(self.bv)
def forward(self, h, y_emb=None, memory=None, mask_x=None, xids=None, max_ext_len=None):
if self.copy:
atts, dists = self.external_attn(query=h, key=memory, value=memory, key_padding_mask=mask_x, need_weights = True)
pred = T.softmax(self.proj(T.cat([h, y_emb, atts], -1)), dim=-1)
if max_ext_len > 0:
ext_zeros = Variable(torch.zeros(pred.size(0), pred.size(1), max_ext_len)).to(self.device)
pred = T.cat((pred, ext_zeros), -1)
g = T.sigmoid(F.linear(T.cat([h, y_emb, atts], -1), self.v, self.bv))
xids = xids.transpose(0, 1).unsqueeze(0).repeat(pred.size(0), 1, 1)
pred = (g * pred).scatter_add(2, xids, (1 - g) * dists)
else:
pred = T.softmax(self.proj(h), dim=-1)
dists = None
return pred, dists
|
try:
import tensorflow as tf
except ImportError:
raise ImportError("reinforcement requires tensorflow 1.14")
from reinforcement.models.neural_network import NeuralNetwork, InvalidOperationError, NotCompiledError
class AnnBuilder:
def __init__(self, input_size, seed):
self.ann = NeuralNetwork(input_size, seed)
self.loss = 'mean_squared_error'
self.optimizer = 'gradient_descent'
self.lr = 0.1
def add_layer(self, units, activation, weight_init, bias_init='zeros'):
self.ann.add_layer(units, activation, weight_init, bias_init)
return self
def compile(self, loss, optimizer, lr):
self.loss = loss
self.optimizer = optimizer
self.lr = lr
return self
def finish(self):
self.ann.compile(self.loss, self.optimizer, self.lr)
return self.ann
def make_ann(input_size, seed=None):
return AnnBuilder(input_size, seed)
class NeuralNetworkTest(tf.test.TestCase):
def setUp(self):
self.original_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(3)
tf.set_random_seed(42)
tf.reset_default_graph()
def tearDown(self):
tf.logging.set_verbosity(self.original_v)
tf.set_random_seed(None)
def test_zero_layers(self):
with self.assertRaises(InvalidOperationError) as e_info, self.test_session():
make_ann(input_size=1).finish()
self.assertEqual(str(e_info.exception), "A Neural Network needs at least one layer to be compiled")
def test_simple_two_neuron_network(self):
with self.test_session():
ann = make_ann(input_size=1).add_layer(1, 'linear', 'ones').finish()
self.assertAllEqual(ann.predict([[3]]), [[3]])
self.assertAllEqual(ann.predict([[-1]]), [[-1]])
def test_relu_activation(self):
with self.test_session():
ann = make_ann(input_size=1).add_layer(1, 'relu', 'ones').finish()
self.assertAllEqual(ann.predict([[3]]), [[3]])
self.assertAllEqual(ann.predict([[-1]]), [[0]])
def test_missing_activation_function(self):
with self.assertRaises(NotImplementedError) as e_info, self.test_session():
make_ann(input_size=1).add_layer(1, 'MISSING', 'ones').finish()
self.assertEqual(str(e_info.exception), "The specified activation function 'MISSING' has not been implemented")
def test_weighted_inputs(self):
with self.test_session():
ann = make_ann(input_size=2).add_layer(1, 'linear', 'ones').finish()
self.assertAllEqual(ann.predict([[3, 2]]), [[5]])
def test_zero_initialization(self):
with self.test_session():
ann = make_ann(input_size=2).add_layer(1, 'linear', 'zeros').finish()
self.assertAllEqual(ann.predict([[3, 2]]), [[0]])
def test_missing_initialization(self):
with self.assertRaises(NotImplementedError) as e_info, self.test_session():
make_ann(input_size=2).add_layer(1, 'linear', 'MISSING').finish()
self.assertEqual(str(e_info.exception), "The specified initialization 'MISSING' has not been implemented")
def test_glorot_uniform_initialization(self):
with self.test_session():
ann = make_ann(input_size=1, seed=7).add_layer(1, 'linear', 'glorot_uniform').finish()
self.assertAllClose(ann.predict([[3]]), [[-0.16689062]])
def test_bias_initialization(self):
with self.test_session():
ann = make_ann(input_size=1).add_layer(1, 'linear', 'zeros', bias_init='ones').finish()
self.assertAllEqual(ann.predict([[3]]), [[1]])
def test_glorot_uniform_bias_initialization(self):
with self.test_session():
ann = make_ann(input_size=1, seed=7).add_layer(1, 'linear', 'zeros', bias_init='glorot_uniform').finish()
self.assertAllClose(ann.predict([[3]]), [[-0.05563021]])
def test_missing_bias_initialization(self):
with self.assertRaises(NotImplementedError) as e_info, self.test_session():
make_ann(input_size=2).add_layer(1, 'linear', 'zeros', bias_init='MISSING').finish()
self.assertEqual(str(e_info.exception), "The specified initialization 'MISSING' has not been implemented")
def test_training_uncompiled_network(self):
with self.assertRaises(NotCompiledError) as e_info, self.test_session():
ann = NeuralNetwork(input_size=2)
ann.add_layer(1, 'linear', 'zeros')
ann.train([[3, 1], [1, 2]], [[-1], [2]])
self.assertEqual(str(e_info.exception), "The network needs to be compiled before it can be trained")
def test_mean_squared_error_gradient_descent_training(self):
with self.test_session():
ann = make_ann(input_size=1).add_layer(1, 'linear', 'zeros') \
.compile('mean_squared_error', 'gradient_descent', lr=0.5).finish()
ann.train([[0]], [[10]])
self.assertAllClose(ann.predict([[0]]), [[10]])
def test_different_learning_rate(self):
with self.test_session():
ann = make_ann(input_size=1).add_layer(1, 'linear', 'zeros') \
.compile('mean_squared_error', 'gradient_descent', lr=0.1).finish()
ann.train([[0]], [[10]])
self.assertAllClose(ann.predict([[0]]), [[2]])
def test_missing_loss_function(self):
with self.assertRaises(NotImplementedError) as e_info, self.test_session():
make_ann(input_size=1).add_layer(1, 'linear', 'zeros') \
.compile('MISSING', 'gradient_descent', lr=0.1).finish()
self.assertEqual(str(e_info.exception), "The specified loss function 'MISSING' has not been implemented")
def test_missing_optimizer(self):
with self.assertRaises(NotImplementedError) as e_info, self.test_session():
make_ann(input_size=1).add_layer(1, 'linear', 'zeros') \
.compile('mean_squared_error', 'MISSING', lr=0.1).finish()
self.assertEqual(str(e_info.exception), "The specified optimizer 'MISSING' has not been implemented")
def test_adding_layer_after_compilation(self):
with self.assertRaises(InvalidOperationError) as e_info, self.test_session():
ann = NeuralNetwork(input_size=1)
ann.add_layer(1, 'linear', 'zeros')
ann.compile('mean_squared_error', 'gradient_descent', 0.1)
ann.add_layer(1, 'linear', 'zeros')
self.assertEqual(str(e_info.exception), "Adding layers after compiling a network is not supported")
def test_deep_neural_network_prediction(self):
with self.test_session():
ann = make_ann(input_size=2).add_layer(2, 'relu', 'ones').add_layer(1, 'linear', 'ones').finish()
self.assertAllEqual(ann.predict([[3, 2]]), [[10]])
self.assertAllEqual(ann.predict([[3, -1]]), [[4]])
def test_deep_neural_network_training(self):
with self.test_session():
ann = make_ann(input_size=1).add_layer(1, 'linear', 'zeros').add_layer(1, 'linear', 'zeros') \
.compile('mean_squared_error', 'gradient_descent', lr=0.5).finish()
ann.train([[0]], [[10]])
self.assertAllClose(ann.predict([[0]]), [[10]])
if __name__ == '__main__':
tf.test.main()
|
# creation of the text story behind
storyBehind = '''
Guido van Rossum, the creator of the Python language, named the language
after the BBC show "Monty Python’s Flying Circus". He doesn’t particularly like
snakes that kill animals for food by winding their long bodies around them and
crushing them.
'''
know = True
__author__ = ["Guido van Rossum","Guido", "Rossum", "Guido v Rossum", "Guido v. Rossum"]
__media__ = "BBC"
__show__ = "Monty Python's Flying Circus"
__likes__ = "snakes"
def doAgain():
print("It is not true.\nYou must do it again.")
print('This is the story behind Python:\n{0}'.format(storyBehind))
# Our while loop for the quiz of 4 questions
while know:
author = input("Who is the inventor of Python? ")
if author.lower() == __author__[0].lower():
print("That's true")
show = input("What is the title of the show he likes? ")
if show.lower() == __show__.lower():
print("That's also true")
media = input("What is the name of the media he sees it? ")
if media.lower() == __media__.lower():
print("Great :-)!!!\nNow we enter to the next question:")
likes = input("What is the animal he doesn't like? ")
if likes.lower() == __likes__.lower():
print("Oh!! You are so so great!!\nThank you ;-)\nThat's all for today.")
break
else:
doAgain()
else:
doAgain()
else:
doAgain()
else:
doAgain() |
#! python3
# pw.py – Um programa para repositório de senhas que NÃO É SEGURO!!!.
import sys
import pyperclip
import os
if len(sys.argv) < 2:
print('Usage: python pw.py [account] - copy account password')
sys.exit()
passwords = {'email': 'teste@teste'}
account = sys.argv[1] # o primeiro argumento da linha de comando é o nome da conta
if sys.argv[1] in passwords:
pyperclip.copy(passwords[sys.argv[1]])
print()
print(f'\nSenha para {sys.argv[1]} copiada para área de transferência')
else:
print(f'\nNão foi encontrado senha cadastrada para {sys.argv[1]}')
os.system('PAUSE')
# TODO Adicionar o comando para inserir novas contas
# TODO Adicionar criptográfia as senhas e contas salvas
# TODO Salvar os dados em um arquivo shelve
# TODO Retirar todas as senhas do programa principal e utilizar um arquivo json criptografado
|
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
app = Flask(__name__)
appdir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(appdir, 'db.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
from src import routes |
import asyncio
from unittest import mock
import pytest
from redical import create_pool, PoolClosedError, PoolClosingError, WatchError
pytestmark = [pytest.mark.asyncio]
@pytest.fixture
async def pool(redis_uri):
pool = await create_pool(redis_uri, max_size=4, min_size=2)
await pool.execute('flushdb')
yield pool
if not pool.is_closed and pool.is_closing:
await pool.wait_closed()
return
if not pool.is_closed:
pool.close()
await pool.wait_closed()
async def test_min_lower_than_max(redis_uri):
with pytest.raises(ValueError, match="'min_size' must be lower than 'max_size'"):
await create_pool(redis_uri, min_size=10, max_size=1)
async def test_db_less_than_zero(redis_uri):
with pytest.raises(ValueError, match="'db' must be a non-negative number"):
await create_pool(redis_uri, db=-1)
async def test_max_chunk_size_less_than_zero(redis_uri):
with pytest.raises(ValueError, match="'max_chunk_size' must be a number greater than zero"):
await create_pool(redis_uri, max_chunk_size=-1)
async def test_min_pool_filled(pool):
assert 2 == pool.available
assert 2 == pool.size
async def test_pool_double_close(pool):
pool.close()
with pytest.raises(PoolClosingError, match='Pool is already closing'):
pool.close()
async def test_pool_already_closed(pool):
pool.close()
await pool.wait_closed()
with pytest.raises(PoolClosedError, match='Pool is already closed'):
pool.close()
async def test_wait_not_closed(pool):
with pytest.raises(RuntimeError, match='Pool is not closing'):
await pool.wait_closed()
async def test_double_wait_closed(pool):
pool.close()
await pool.wait_closed()
with pytest.raises(RuntimeError, match='Pool is not closing'):
await pool.wait_closed()
# |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
# Internal connection state
async def test_acquiring_connection_rotates_pool(pool):
# make it look like the first connection in the pool is being used
# so that we *should* pick the second one to execute our command
# the connection needs to stay in the pool so that we know we're rotating past it.
# if not we could just enter a pipeline and in the same block execute a random command
# get rid of one of the connections
conn = pool._pool.popleft()
conn.close()
await conn.wait_closed()
# replace with a mock connection
conn = mock.Mock(in_use=True, is_closed=False, is_closing=False, execute=mock.AsyncMock(return_value='PONG'))
pool._pool.appendleft(conn)
assert 'PONG' == await pool.execute('ping')
# remove the mock from the internal pool so cleanup doesn't complain
pool._pool.remove(conn)
conn.execute.assert_not_called()
async def test_release_drop_closed_connection(pool):
conn = await pool._acquire_unused_connection(remove_from_pool=True)
assert 2 == pool.size
conn.close()
await conn.wait_closed()
await pool._release_connection(conn)
assert 1 == pool.size
assert conn not in pool._pool
async def test_release_drop_closing_connection(pool):
conn = await pool._acquire_unused_connection(remove_from_pool=True)
assert 2 == pool.size
conn.close()
await pool._release_connection(conn)
await conn.wait_closed()
assert 1 == pool.size
assert conn not in pool._pool
async def test_acquire_create_new(pool):
conn = await pool._acquire_unused_connection(remove_from_pool=True)
pool._in_use.add(conn)
conn = await pool._acquire_unused_connection(remove_from_pool=True)
pool._in_use.add(conn)
conn = await pool._acquire_unused_connection()
assert 3 == pool.size
assert 1 == pool.available
async def test_acquire_waits_if_no_available_connection(pool):
conns = []
for x in range(4):
conn = await pool._acquire_unused_connection(remove_from_pool=True)
pool._in_use.add(conn)
conns.append(conn)
assert 4 == pool.size
# there are now no available connections in the pool and it is at its maximum size
event = asyncio.Event()
async def acquire(event):
loop = asyncio.get_running_loop()
loop.call_soon(event.set)
conn = await pool._acquire_unused_connection()
assert conn in conns
async def release(event):
await event.wait()
await pool._release_connection(conns[-1])
await asyncio.wait_for(asyncio.gather(release(event), acquire(event)), timeout=1)
async def test_acquire_prune_stale_connections(pool):
assert 2 == pool.available
for conn in pool._pool:
conn.close()
await conn.wait_closed()
assert all([conn.is_closed for conn in pool._pool])
await pool.execute('set', 'foo', 'bar')
assert 1 == pool.size
# |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
# Execute
async def test_execute_basic(pool):
await pool.execute('set', 'foo', 'bar')
assert 'bar' == await pool.execute('get', 'foo')
async def test_execute_no_free_connections(pool):
conns = []
for x in range(4):
conns.append(await pool._acquire_unused_connection(remove_from_pool=True))
pool._in_use.add(conns[-1])
assert pool.size == pool.max_size
event = asyncio.Event()
async def execute(event):
loop = asyncio.get_event_loop()
loop.call_soon(event.set)
True is await pool.execute('set', 'foo', 'bar')
async def release(event):
await event.wait()
await pool._release_connection(conns[-1])
await asyncio.wait_for(asyncio.gather(release(event), execute(event)), timeout=1)
async def test_execute_pool_closed(pool):
pool.close()
await pool.wait_closed()
with pytest.raises(PoolClosedError, match='Pool is closed'):
await pool.execute('get', 'foo')
async def test_execute_pool_closing(pool):
pool.close()
with pytest.raises(PoolClosingError, match='Pool is closing'):
await asyncio.wait_for(pool.execute('get', 'foo'), timeout=1)
# |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
# Pipelines
async def test_pipeline(pool):
async with pool as conn:
fut1 = conn.execute('set', 'foo', 'bar')
fut2 = conn.execute('set', 'bar', 'baz')
fut3 = conn.execute('get', 'foo')
fut4 = conn.execute('get', 'bar')
assert True is await fut1
assert True is await fut2
assert 'bar' == await fut3
assert 'baz' == await fut4
async def test_pipelines_sequester_connection(pool):
async with pool:
assert 2 == pool.size
assert 1 == pool.available
async def test_context_sanity_check(pool):
async def t1(event):
async with pool as conn:
conn.execute('set', 'foo', 'bar')
fut = conn.execute('get', 'foo')
await event.wait()
assert 'bar' == await fut
async def t2(event):
async with pool as conn:
conn.execute('set', 'foo', 'baz')
fut = conn.execute('get', 'foo')
assert 0 == pool.available
assert 2 == pool.size
event.set()
assert 'baz' == await fut
event = asyncio.Event()
await asyncio.gather(t1(event), t2(event))
async def test_pipeline_releases_connection(pool):
async with pool:
assert 2 == pool.size
assert 1 == pool.available
assert 2 == pool.size
assert 2 == pool.available
async def test_pipeline_pool_closed(pool):
pool.close()
await pool.wait_closed()
with pytest.raises(PoolClosedError, match='Pool is closed'):
async with pool:
pass
async def test_pipeline_pool_closing(pool):
pool.close()
with pytest.raises(PoolClosingError, match='Pool is closing'):
async with pool:
pass
# |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
# Transactions
async def test_transaction_sequester_connection(pool):
async with pool.transaction():
assert 2 == pool.size
assert 1 == pool.available
async def test_transaction_releases_connection(pool):
async with pool.transaction():
assert 2 == pool.size
assert 1 == pool.available
assert 2 == pool.size
assert 2 == pool.available
async def test_transaction_release_with_error(pool):
with pytest.raises(ValueError):
async with pool.transaction():
raise ValueError('an error')
assert 2 == pool.size
assert 2 == pool.available
async def test_transaction_watch_error(pool):
await pool.execute('SET', 'mykey', 1)
async with pool.transaction('mykey') as t:
val = int(await t.execute('GET', 'mykey'))
val += 1
with pytest.raises(WatchError, match='Transaction aborted, WATCHed keys: mykey'):
async with t as pipe:
await pool.execute('SET', 'mykey', 'foo')
fut = pipe.execute('SET', 'mykey', val)
assert 'foo' == await pool.execute('GET', 'mykey')
with pytest.raises(WatchError, match='Transaction aborted, WATCHed keys: mykey'):
await fut
async def test_transaction_pool_closed(pool):
pool.close()
await pool.wait_closed()
with pytest.raises(PoolClosedError, match='Pool is closed'):
async with pool.transaction():
pass
async def test_transaction_pool_closing(pool):
pool.close()
with pytest.raises(PoolClosingError, match='Pool is closing'):
async with pool.transaction():
pass
# |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
# Error responses
async def test_custom_error_response(pool):
def custom_error(exc):
return ValueError(str(exc).replace('ERR ', ''))
with pytest.raises(ValueError, match="wrong number of arguments for 'hset' command"):
await pool.execute('hset', 'mykey', error_func=custom_error)
|
"""Plugwise Anna Home Assistant component."""
import requests
import xml.etree.cElementTree as Etree
# Time related
import datetime
import pytz
from dateutil.parser import parse
# For XML corrections
import re
ANNA_PING_ENDPOINT = "/ping"
ANNA_DIRECT_OBJECTS_ENDPOINT = "/core/direct_objects"
ANNA_DOMAIN_OBJECTS_ENDPOINT = "/core/domain_objects"
ANNA_LOCATIONS_ENDPOINT = "/core/locations"
ANNA_APPLIANCES = "/core/appliances"
ANNA_RULES = "/core/rules"
class Haanna:
"""Define the Haanna object."""
def __init__(
self, username, password, host, port, legacy_anna=False,
):
"""Set the constructor for this class."""
self.legacy_anna = legacy_anna
self._username = username
self._password = password
self._endpoint = "http://" + host + ":" + str(port)
def ping_anna_thermostat(self):
"""Ping the thermostat to see if it's online."""
ping = requests.get(
self._endpoint + ANNA_PING_ENDPOINT,
auth=(self._username, self._password),
timeout=10,
)
if ping.status_code != 404:
raise ConnectionError("Could not connect to the gateway.")
return True
def get_direct_objects(self):
"""Collect the direct_objects XML-data."""
xml = requests.get(
self._endpoint + ANNA_DIRECT_OBJECTS_ENDPOINT,
auth=(self._username, self._password),
timeout=10,
)
if xml.status_code != requests.codes.ok: # pylint: disable=no-member
raise ConnectionError("Could not get the direct objects.")
return Etree.fromstring(self.escape_illegal_xml_characters(xml.text))
def get_domain_objects(self):
"""Collect the domain_objects XML-data."""
xml = requests.get(
self._endpoint + ANNA_DOMAIN_OBJECTS_ENDPOINT,
auth=(self._username, self._password),
timeout=10,
)
if xml.status_code != requests.codes.ok: # pylint: disable=no-member
raise ConnectionError("Could not get the domain objects.")
return Etree.fromstring(self.escape_illegal_xml_characters(xml.text))
@staticmethod
def escape_illegal_xml_characters(root):
"""Replace illegal &-characters."""
return re.sub(r"&([^a-zA-Z#])", r"&\1", root)
def get_presets(self, root):
"""Get the presets from the thermostat."""
if self.legacy_anna:
return self.__get_preset_dictionary_v1(root)
rule_id = self.get_rule_id_by_template_tag(
root, "zone_setpoint_and_state_based_on_preset",
)[0]
if rule_id is None:
rule_id = self.get_rule_id_by_name(root, "Thermostat presets")
if rule_id is None:
raise RuleIdNotFoundException("Could not find the rule id.")
presets = self.get_preset_dictionary(root, rule_id)
return presets
def get_schema_names(self, root):
"""Get schemas or schedules available."""
schemas = root.findall(".//rule")
result = []
for schema in schemas:
rule_name = schema.find("name").text
if rule_name:
if self.legacy_anna:
if "preset" not in rule_name:
result.append(rule_name)
else:
if "presets" not in rule_name:
result.append(rule_name)
if result == []:
return None
return result
def set_schema_state(self, root, schema, state):
"""Send a set request to the schema with the given name."""
schema_rule_id = self.get_rule_id_by_name(root, str(schema))
templates = root.findall(".//*[@id='{}']/template".format(schema_rule_id))
template_id = None
for rule in templates:
template_id = rule.attrib["id"]
uri = "{};id={}".format(ANNA_RULES, schema_rule_id)
state = str(state)
data = (
'<rules><rule id="{}"><name><![CDATA[{}]]></name>'
'<template id="{}" /><active>{}</active></rule>'
"</rules>".format(schema_rule_id, schema, template_id, state)
)
xml = requests.put(
self._endpoint + uri,
auth=(self._username, self._password),
data=data,
headers={"Content-Type": "text/xml"},
timeout=10,
)
if xml.status_code != requests.codes.ok: # pylint: disable=no-member
CouldNotSetTemperatureException(
"Could not set the schema to {}.".format(state) + xml.text
)
return "{} {}".format(xml.text, data)
def get_active_schema_name(self, root):
"""Get active schema."""
if self.legacy_anna:
schemas = root.findall(".//rule")
result = []
for schema in schemas:
rule_name = schema.find("name").text
if "preset" not in rule_name:
result.append(rule_name)
result = "".join(map(str, result))
if result == []:
return None
return result
locator = "zone_preset_based_on_time_and_presence_with_override"
rule_id = self.get_rule_id_by_template_tag(root, locator)
if rule_id:
schema_active = self.get_active_name(root, rule_id)
return schema_active
def get_last_active_schema_name(self, root):
"""Determine the last active schema (not used for legacy Anna)."""
if not self.legacy_anna:
locator = "zone_preset_based_on_time_and_presence_with_override"
rule_id = self.get_rule_id_by_template_tag(root, locator)
last_schema_active = self.get_last_active_name(root, rule_id)
return last_schema_active
@staticmethod
def get_schema_state(root):
"""Get the mode the thermostat is in (active schedule is true or false)."""
log_type = "schedule_state"
locator = (
"appliance[type='thermostat']/logs/point_log[type='"
+ log_type
+ "']/period/measurement"
)
if root.find(locator) is not None:
return root.find(locator).text == "on"
return None
@staticmethod
def get_rule_id_by_template_tag(root, rule_name):
"""Get the rule ID based on template_tag."""
schema_ids = []
rules = root.findall("rule")
for rule in rules:
if rule.find("template").attrib["tag"] == rule_name:
schema_ids.append(rule.attrib["id"])
if schema_ids == []:
return None
return schema_ids
def set_preset(self, root, preset):
"""Set the given preset on the thermostat."""
if self.legacy_anna:
return self.__set_preset_v1(root, preset)
locator = "appliance[type='thermostat']/location"
location_id = root.find(locator).attrib["id"]
locations_root = Etree.fromstring(
requests.get(
self._endpoint + ANNA_LOCATIONS_ENDPOINT,
auth=(self._username, self._password),
timeout=10,
).text
)
current_location = locations_root.find("location[@id='" + location_id + "']")
location_name = current_location.find("name").text
location_type = current_location.find("type").text
xml = requests.put(
self._endpoint + ANNA_LOCATIONS_ENDPOINT + ";id=" + location_id,
auth=(self._username, self._password),
data="<locations>"
+ '<location id="'
+ location_id
+ '">'
+ "<name>"
+ location_name
+ "</name>"
+ "<type>"
+ location_type
+ "</type>"
+ "<preset>"
+ preset
+ "</preset>"
+ "</location>"
+ "</locations>",
headers={"Content-Type": "text/xml"},
timeout=10,
)
if xml.status_code != requests.codes.ok: # pylint: disable=no-member
raise CouldNotSetPresetException(
"Could not set the " "given preset: " + xml.text
)
return xml.text
def __set_preset_v1(self, root, preset):
"""Set the given preset on the thermostat for V1."""
locator = "rule/directives/when/then[@icon='" + preset + "'].../.../..."
rule = root.find(locator)
if rule is None:
raise CouldNotSetPresetException("Could not find preset '" + preset + "'")
rule_id = rule.attrib["id"]
xml = requests.put(
self._endpoint + ANNA_RULES,
auth=(self._username, self._password),
data="<rules>"
+ '<rule id="'
+ rule_id
+ '">'
+ "<active>true</active>"
+ "</rule>"
+ "</rules>",
headers={"Content-Type": "text/xml"},
timeout=10,
)
if xml.status_code != requests.codes.ok: # pylint: disable=no-member
raise CouldNotSetPresetException(
"Could not set the given " "preset: " + xml.text
)
return xml.text
@staticmethod
def get_boiler_status(root):
"""Get the active boiler-heating status (On-Off control)."""
log_type = "boiler_state"
locator = (
"appliance[type='heater_central']/logs/point_log[type='"
+ log_type
+ "']/period/measurement"
)
if root.find(locator) is not None:
return root.find(locator).text == "on"
return None
@staticmethod
def get_heating_status(root):
"""Get the active heating status (OpenTherm control)."""
log_type = "central_heating_state"
locator = (
"appliance[type='heater_central']/logs/point_log[type='"
+ log_type
+ "']/period/measurement"
)
if root.find(locator) is not None:
return root.find(locator).text == "on"
return None
@staticmethod
def get_cooling_status(root):
"""Get the active cooling status."""
log_type = "cooling_state"
locator = (
"appliance[type='heater_central']/logs/point_log[type='"
+ log_type
+ "']/period/measurement"
)
if root.find(locator) is not None:
return root.find(locator).text == "on"
return None
def get_domestic_hot_water_status(self, root):
"""Get the domestic hot water status."""
if self.legacy_anna:
return None
log_type = "domestic_hot_water_state"
locator = (
"appliance[type='heater_central']/logs/point_log[type='"
+ log_type
+ "']/period/measurement"
)
if root.find(locator) is not None:
return root.find(locator).text == "on"
return None
def get_current_preset(self, root):
"""Get the current active preset."""
if self.legacy_anna:
active_rule = root.find("rule[active='true']/directives/when/then")
if active_rule is None or "icon" not in active_rule.keys():
return "none"
return active_rule.attrib["icon"]
log_type = "preset_state"
locator = (
"appliance[type='thermostat']/logs/point_log[type='"
+ log_type
+ "']/period/measurement"
)
return root.find(locator).text
def get_schedule_temperature(self, root):
"""Get the temperature setting from the selected schedule."""
point_log_id = self.get_point_log_id(root, "schedule_temperature")
if point_log_id:
measurement = self.get_measurement_from_point_log(root, point_log_id)
if measurement:
value = float(measurement)
return value
return None
def get_current_temperature(self, root):
"""Get the curent (room) temperature from the thermostat - match to HA name."""
current_temp_point_log_id = self.get_point_log_id(root, "temperature")
if current_temp_point_log_id:
measurement = self.get_measurement_from_point_log(
root, current_temp_point_log_id
)
value = float(measurement)
return value
return None
def get_target_temperature(self, root):
"""Get the target temperature from the thermostat."""
target_temp_log_id = self.get_point_log_id(root, "target_temperature")
if target_temp_log_id:
measurement = self.get_measurement_from_point_log(root, target_temp_log_id)
value = float(measurement)
return value
return None
def get_thermostat_temperature(self, root):
"""Get the target temperature from the thermostat."""
thermostat_log_id = self.get_point_log_id(root, "thermostat")
if thermostat_log_id:
measurement = self.get_measurement_from_point_log(root, thermostat_log_id)
value = float(measurement)
return value
return None
def get_outdoor_temperature(self, root):
"""Get the temperature from the thermostat."""
outdoor_temp_log_id = self.get_point_log_id(root, "outdoor_temperature")
if outdoor_temp_log_id:
measurement = self.get_measurement_from_point_log(root, outdoor_temp_log_id)
value = float(measurement)
value = '{:.1f}'.format(round(value, 1))
return value
return None
def get_illuminance(self, root):
"""Get the illuminance value from the thermostat."""
point_log_id = self.get_point_log_id(root, "illuminance")
if point_log_id:
measurement = self.get_measurement_from_point_log(root, point_log_id)
value = float(measurement)
value = '{:.1f}'.format(round(value, 1))
return value
return None
def get_boiler_temperature(self, root):
"""Get the boiler_temperature value from the thermostat."""
point_log_id = self.get_point_log_id(root, "boiler_temperature")
if point_log_id:
measurement = self.get_measurement_from_point_log(root, point_log_id)
value = float(measurement)
value = '{:.1f}'.format(round(value, 1))
return value
return None
def get_water_pressure(self, root):
"""Get the water pressure value from the thermostat."""
point_log_id = self.get_point_log_id(root, "central_heater_water_pressure")
if point_log_id:
measurement = self.get_measurement_from_point_log(root, point_log_id)
value = float(measurement)
value = '{:.1f}'.format(round(value, 1))
return value
return None
def __get_temperature_uri(self, root):
"""Determine the set_temperature uri for different versions of Anna."""
if self.legacy_anna:
locator = "appliance[type='thermostat']"
appliance_id = root.find(locator).attrib["id"]
return ANNA_APPLIANCES + ";id=" + appliance_id + "/thermostat"
locator = "appliance[type='thermostat']/location"
location_id = root.find(locator).attrib["id"]
locator = (
"location[@id='"
+ location_id
+ "']/actuator_functionalities/thermostat_functionality"
)
thermostat_functionality_id = root.find(locator).attrib["id"]
temperature_uri = (
ANNA_LOCATIONS_ENDPOINT
+ ";id="
+ location_id
+ "/thermostat;id="
+ thermostat_functionality_id
)
return temperature_uri
def set_temperature(self, root, temperature):
"""Send a set request to the temperature with the given temperature."""
uri = self.__get_temperature_uri(root)
temperature = str(temperature)
xml = requests.put(
self._endpoint + uri,
auth=(self._username, self._password),
data="<thermostat_functionality><setpoint>"
+ temperature
+ "</setpoint></thermostat_functionality>",
headers={"Content-Type": "text/xml"},
timeout=10,
)
if xml.status_code != requests.codes.ok: # pylint: disable=no-member
CouldNotSetTemperatureException("Could not set the temperature." + xml.text)
return xml.text
def get_anna_endpoint(self):
"""Get the ANNA Endpoint."""
return self._endpoint
@staticmethod
def get_point_log_id(root, log_type):
"""Get the point log ID based on log type."""
locator = (
"module/services/*[@log_type='" + log_type + "']/functionalities/point_log"
)
if root.find(locator) is not None:
return root.find(locator).attrib["id"]
return None
@staticmethod
def get_measurement_from_point_log(root, point_log_id):
"""Get the measurement from a point log based on point log ID."""
locator = "*/logs/point_log[@id='" + point_log_id + "']/period/measurement"
if root.find(locator) is not None:
return root.find(locator).text
return None
@staticmethod
def get_rule_id_by_name(root, rule_name):
"""Get the rule ID based on name."""
rules = root.findall("rule")
for rule in rules:
if rule.find("name").text == rule_name:
return rule.attrib["id"]
@staticmethod
def get_preset_dictionary(root, rule_id):
"""Get the presets from a rule based on rule ID and returns a dictionary with all the key-value pairs."""
preset_dictionary = {}
directives = root.find("rule[@id='" + rule_id + "']/directives")
for directive in directives:
preset = directive.find("then").attrib
keys, dummy = zip(*preset.items())
if str(keys[0]) == "setpoint":
preset_dictionary[directive.attrib["preset"]] = float(preset["setpoint"])
else:
preset_dictionary[directive.attrib["preset"]] = float(preset["heating_setpoint"])
return preset_dictionary
@staticmethod
def __get_preset_dictionary_v1(root):
"""
Get the presets and returns a dictionary with all the key-value pairs.
Example output: {'away': 17.0, 'home': 20.0, 'vacation': 15.0,
'no_frost': 10.0, 'asleep': 15.0}.
"""
preset_dictionary = {}
directives = root.findall("rule/directives/when/then")
for directive in directives:
if directive is not None and "icon" in directive.keys():
preset_dictionary[directive.attrib["icon"]] = float(
directive.attrib["temperature"]
)
return preset_dictionary
@staticmethod
def get_active_mode(root, schema_ids):
"""Get the mode from a (list of) rule id(s)."""
active = False
for schema_id in schema_ids:
if root.find("rule[@id='" + schema_id + "']/active").text == "true":
active = True
break
return active
@staticmethod
def get_active_name(root, schema_ids):
"""Get the active schema from a (list of) rule id(s)."""
active = None
for schema_id in schema_ids:
locator = root.find("rule[@id='" + schema_id + "']/active")
# Only one can be active
if locator.text == "true":
active = root.find("rule[@id='" + schema_id + "']/name").text
return active
@staticmethod
def get_last_active_name(root, schema_ids):
"""Get the last active schema from a (list of) rule id(s)."""
schemas = {}
epoch = datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)
date_format = "%Y-%m-%dT%H:%M:%S.%f%z"
for schema_id in schema_ids:
schema_name = root.find("rule[@id='" + schema_id + "']/name").text
schema_date = root.find("rule[@id='" + schema_id + "']/modified_date").text
schema_time = parse(schema_date)
schemas[schema_name] = (schema_time - epoch).total_seconds()
last_modified = sorted(schemas.items(), key=lambda kv: kv[1])[-1][0]
return last_modified
class AnnaException(Exception):
"""Define Exceptions."""
def __init__(self, arg1, arg2=None):
"""Set the base exception for interaction with the Anna gateway."""
self.arg1 = arg1
self.arg2 = arg2
super(AnnaException, self).__init__(arg1)
class RuleIdNotFoundException(AnnaException):
"""Raise an exception for when the rule id is not found in the direct objects."""
pass
class CouldNotSetPresetException(AnnaException):
"""Raise an exception for when the preset can not be set."""
pass
class CouldNotSetTemperatureException(AnnaException):
"""Raise an exception for when the temperature could not be set."""
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from .context import tripp
from tripp import gradient
from tripp import algebra
from functools import partial
import random
import logging
logging.basicConfig(level=logging.INFO, format="%(lineno)d\t%(message)s")
def square(x):
"""for testing"""
return x * x
def derivative(x):
"""for testing"""
return 2 * x
class TestGradient(unittest.TestCase):
def setUp(self):
pass
def test_sum_of_squares(self):
"""gradient -- sum of squares"""
sut = [1, 2, 3, 4, 5]
result = gradient.sum_of_squares(sut)
self.assertEqual(result, 55)
def test_derivative_estimates(self):
"""gradient -- difference quotient"""
derivative_estimate = partial(gradient.difference_quotient,
square,
h=0.00001)
x = range(-10, 10)
actuals = map(derivative, x)
estimates = map(derivative_estimate, x)
for comparison in zip(actuals, estimates):
actual, estimate = comparison
self.assertEqual(actual, int(round(estimate, 1)))
def test_step(self):
"""gradient -- step"""
v = [random.randint(-10, 10) for i in range(3)]
tolerance = 0.0000001
while True:
_gradient = gradient.sum_of_squares_gradient(v)
next_v = gradient.step(v, _gradient, -0.01)
if algebra.distance(next_v, v) < tolerance:
break
v = next_v
expected = [0.0, 0.0, 0.0]
returned = map(lambda x: round(x, 5), v)
self.assertEqual(returned, expected)
def test_negate(self):
"""gradient -- negate"""
vals = [2, 4.5, 99, 0.000005]
funcs = [
lambda w: 7 * w + 5,
lambda x: x * 2,
lambda y: y ** 4,
lambda z: 2 * z - 7,
lambda w: 7 * w + 5]
for v, f in zip(vals, funcs):
result = f(v)
negation = gradient.negate(f)
self.assertEqual(result * -1, negation(v))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cobra.models.fields.foreignkey
class Migration(migrations.Migration):
dependencies = [
('accessgroup', '0001_initial'),
('team', '0001_initial'),
('project', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='accessgroup',
name='projects',
field=models.ManyToManyField(related_name='+', to='project.Project'),
preserve_default=True,
),
migrations.AddField(
model_name='accessgroup',
name='team',
field=cobra.models.fields.foreignkey.FlexibleForeignKey(related_name='+', to='team.Team'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='accessgroup',
unique_together=set([('team', 'name')]),
),
]
|
import patch
class Connection:
def __init__(self, app, connections):
pass
def iterate(self, connections):
pass
def no_connection(self, connections):
pass
|
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField
from wtforms.validators import Email,Required
from flask_wtf import FlaskForm
from flask_wtf.file import FileField,FileAllowed
from wtforms import StringField,TextAreaField,SubmitField,ValidationError
from wtforms.validators import Email,Required
from flask_login import current_user
from ..models import User
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit') |
import argparse
import sys
from typing import Optional
from typing import Sequence
import toml
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to check.')
args = parser.parse_args(argv)
retval = 0
for filename in args.filenames:
try:
with open(filename) as file_handler:
toml.load(file_handler)
except toml.TomlDecodeError as exc:
print('{}: {}'.format(filename, exc))
retval = 1
return retval
if __name__ == '__main__':
sys.exit(main())
|
import cadquery as cq
core_rad = 45.0 / 2.0
# Objects to make the basic shape of the guide
guide_core = (cq.Workplane("XY")
.circle(core_rad)
.extrude(3.0))
guide_fan = (cq.Workplane("XY")
.moveTo(0.0, -10.0)
.hLine(core_rad + 15)
.threePointArc((0.0, core_rad + 15), (-core_rad - 15, -10.0))
.close()
.extrude(3.0))
# Fuse both objects so they act as one
guide = guide_core.union(guide_fan)
# Put guide holes in fan
guide = guide.faces(">Z").workplane(centerOption="ProjectedOrigin").polarArray(core_rad + 7.5, -10, 90, 6).circle(2.5).cutThruAll()
# Center shaft boss
#guide = guide.faces(">Z").workplane(centerOption="ProjectedOrigin").circle(10.0).extrude(7.0)
# Put the center hole in
guide = guide.faces(">Z").workplane(centerOption="ProjectedOrigin").circle(9.75 / 2.0).cutThruAll()
# Put the set screw holes in
guide = guide.faces("<Z").workplane(centerOption="ProjectedOrigin", invert=True).transformed(offset = (0, 0, 7)).transformed(rotate=(0, 90, 0)).circle(2.5 / 2.0).cutThruAll()
# Export to STL for printing
guide.val().exportStl("/home/jwright/Downloads/guide_1.stl", precision=0.0001)
show_object(guide) |
"""useful rexx functions
This module is intended to help with porting ATI code to python.
ATI had several rexx-like functions that will likely be in this
module as python functions. Other functions are included that may
be useful in parsing strings in a rexx-like manner.
Some of these functions have enhancements on top of the rexx
function. Enhancments include using a negative index/position or
length to indicate a position relative to the END of the string. See
individual functions for details.
USAGE
from rexx import *
Copyright 2021 IBM Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import re as _re
from . import __version__
__author__ = "Neil Johnson"
def copies(string, cnt):
"""returns cnt concatenated copies of string. The cnt must be a
positive whole number or zero."""
if cnt < 0:
raise ValueError("cnt="+repr(cnt)+" is not valid")
return str(string)*cnt
def delword(string, sw_or_n, len_or_si=None):
"""returns string after deleting the substring that starts at
the nth word and is of length blank-delimited words. If you omit
length, or if length is greater than the number of words from n
to the end of string, the function deletes the remaining words in
string (including the nth word). The length must be a positive
whole number or zero. The n must be a positive whole number. If n
is greater than the number of words in string, the function
returns string unchanged. The string deleted includes any blanks
following the final word involved but none of the blanks
preceding the first word involved."""
if isinstance(sw_or_n, str):
sword = sw_or_n
sidx = len_or_si
if sidx is None:
sidx = 1
pos1 = wordpos(string, sword, sidx)
if pos1 <= 0:
return string
cnt = words(string)
if cnt <= 0:
return string
return delword(string, pos1, cnt)
idx = sw_or_n
if idx <= 0:
raise ValueError("n must be greater than zero")
length = len_or_si
if length is None:
sidx = wordindex(string, idx)
if not sidx:
return string
return string[0:sidx-1]
sidx = wordindex(string, idx+length)
if not sidx:
return delword(string, idx)
return delword(string, idx)+string[sidx-2:].lstrip()
def index(haystack, needle, start=1):
"""returns the character position of one string, needle, in
another, haystack, or returns 0 if the string needle is not found
or is a null string. By default the search starts at the first
character of haystack (start has the value 1). You can override
this by specifying a different start point, which must be a
positive whole number.
"""
if start <= 0:
raise ValueError("start="+repr(start)+" is not valid")
return 1+haystack.find(needle, start-1)
def left(string, length, pad=" "):
"""returns a string of length length, containing the leftmost
length characters of string. The string returned is padded with
pad characters (or truncated) on the right as needed. The default
pad character is a blank. length must be a positive whole number
or zero.
"""
if length < 0:
raise ValueError("length="+repr(length)+" is not valid")
return substr(string, 1, length, pad=pad)
def length(string):
"""returns the length of string.
"""
return len(string)
def pos(needle, haystack, start=1):
"""returns the character position of one string, needle, in
another, haystack, or returns 0 if the string needle is not found
or is a null string. By default the search starts at the first
character of haystack (start has the value 1). You can override
this by specifying a different start point, which must be a
positive whole number.
"""
if start <= 0:
raise ValueError("start="+repr(start)+" is not valid")
return 1+haystack.find(needle, start-1)
def right(string, length, pad=" "):
"""returns a string of length length containing the rightmost
length characters of string. The string returned is padded with
pad characters (or truncated) on the left as needed. The default
pad character is a blank. The length must be a positive whole
number or zero."""
if length < 0:
raise ValueError("length="+repr(length)+" is not valid")
return substr(string, -1, -length, pad=pad)
def space(string, cnt=1, pad=" "):
"""returns the blank-delimited words in string with cnt pad
characters between each word. If you specify cnt, it must be a
positive whole number or zero. If it is 0, all blanks are
removed. Leading and trailing blanks are always removed. The
default for cnt is 1, and the default pad character is a blank.
"""
if cnt < 0:
raise ValueError("cnt="+repr(cnt)+" is not valid")
return (pad * cnt).join(string.split())
def strip(string, option="B", char=" "):
"""returns string with leading or trailing characters or both
removed, based on the option you specify. The following are valid
options. (Only the capitalized letter is needed; all characters
following it are ignored.)
Both
removes both leading and trailing characters from string.
This is the default.
Leading
removes leading characters from string.
Trailing
removes trailing characters from string.
The third argument, char, specifies the character to be removed,
and the default is a blank. With rexx, if you specify char, it
must be exactly one character long. With this function, all
characters specified in the char string are considered for
removal from string.
Here are some examples:
strip(' ab c ') -> 'ab c'
strip(' ab c ','L') -> 'ab c '
strip(' ab c ','t') -> ' ab c'
strip('12.7000',char='0') -> '12.7'
strip('0012.700',char='0') -> '12.7'
"""
if option[0] == "B" or option[0] == "b":
return string.strip(char)
elif option[0] == "L" or option[0] == "l":
return string.lstrip(char)
elif option[0] == "T" or option[0] == "t":
return string.rstrip(char)
else:
raise ValueError("option="+repr(option)+" is not valid")
def substr(string, idx, length=None, pad=" "):
"""returns the substring of string that begins at the idx'th
character and is of length length, padded with pad if necessary.
In rexx, idx must be a positive whole number. In this function, idx
can also be negative. When idx is negative, the begining of the
substring is relative to the end of string like in python. For
example, -1 refers to the last chararacter in string and -2
refers to the second to last character in string and so on.
In this function, length can be negative. A negative length
means that idx refers to the last character in the substring
instead of the first. The length of the returned substring is
always abs(length).
If you omit length, the rest of the string is returned. The
default pad character is a blank.
Here are some examples:
substr('abc',2) -> 'bc'
substr('abc',2,4) -> 'bc '
substr('abc',2,6,'.') -> 'bc....'
See also the 'left' and 'right' functions.
"""
if not idx:
raise ValueError(f"n={idx} is not valid")
if length == 0:
return ""
if length is None:
if idx < 0:
return string[:idx]
return string[idx-1:]
if idx > 0 and length >= 0:
string = string[idx-1:idx-1+length]
elif idx > 0:
if idx + length >= 0:
string = string[idx+length:idx]
else:
string = string[:idx]
elif length >= 0:
if idx + length < 0:
string = string[idx:idx+length]
else:
string = string[idx:]
elif idx < -1:
string = string[idx+length+1:idx+1]
else:
string = string[idx+length+1:]
padding = copies(pad, abs(length) - len(string))
if length >= 0:
return string + padding
return padding + string
def subword(string, wpos, length=None):
"""returns the substring of string that starts at the nth word,
and is up to length blank-delimited words. The n must be a
positive whole number. If you omit length, it defaults to the
number of remaining words in string. The returned string never
has leading or trailing blanks, but includes all blanks between
the selected words.
"""
if wpos <= 0:
raise ValueError("n must be a positive whole number")
if length is None:
cpos = wordindex(string, wpos)
if not cpos:
return ""
return string[cpos-1:].strip()
if length < 0:
raise ValueError("length cannot be negative")
if not length:
return ""
cpos = wordindex(string, wpos+length)
if not cpos:
return subword(string, wpos)
return subword(string[:cpos-2], wpos)
def word(string, wpos):
"""returns the nth blank-delimited word in string or returns the
null string if fewer than n words are in string. The n must be a
positive whole number. This function is exactly equivalent to
subword(string,n,1).
"""
return subword(string, wpos, 1)
def wordindex(string, wpos):
"""returns the position of the first character in the nth
blank-delimited word in string or returns 0 if fewer than n words
are in string. The n must be a positive whole number.
"""
if wpos <= 0:
raise ValueError("n must be a positive whole number")
i = 0
for mat in _re.finditer(r"\S+", string):
i += 1
if i == wpos:
return 1 + mat.start()
return 0
def wordpos(phrase, string, start=1):
"""returns the word number of the first word of phrase found in
string or returns 0 if phrase contains no words or if phrase is
not found. Multiple blanks between words in either phrase or
string are treated as a single blank for the comparison, but
otherwise the words must match exactly.
By default the search starts at the first word in string. You can
override this by specifying start (which must be positive), the
word at which to start the search."""
ws2 = " "+space(subword(string, start))+" "
i = ws2.find(" "+space(phrase)+" ")
if i < 0:
return 0
return start+words(ws2[0:i])
def words(sentence):
"""returns the number of blank-delimited words in string.
"""
return sum(1 for _ in _re.finditer(r"\S+", sentence))
|
# This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0
# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0
#
import logging
from typing import List
from volatility.framework import renderers, exceptions, interfaces
from volatility.framework.configuration import requirements
from volatility.framework.interfaces import plugins
from volatility.framework.layers import intel
vollog = logging.getLogger(__name__)
class Statistics(plugins.PluginInterface):
@classmethod
def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]:
return [
requirements.TranslationLayerRequirement(name = 'primary',
description = 'Memory layer for the kernel',
architectures = ["Intel32", "Intel64"])
]
def _generator(self):
# Do mass mapping and determine the number of different layers and how many pages go to each one
layer = self.context.layers[self.config['primary']]
page_count = swap_count = invalid_page_count = large_page_count = large_swap_count = large_invalid_count = other_invalid = 0
if isinstance(layer, intel.Intel):
page_addr = 0
expected_page_size = 1 << layer.bits_per_register
while page_addr < layer.maximum_address:
try:
_, _, _, page_size, layer_name = list(layer.mapping(page_addr, 2 * expected_page_size))[0]
if layer_name != layer.config['memory_layer']:
swap_count += 1
else:
page_count += 1
if page_size > expected_page_size:
large_page_count += 1
except exceptions.SwappedInvalidAddressException as excp:
swap_count += 1
page_size = (1 << excp.invalid_bits)
if page_size != expected_page_size:
large_swap_count += 1
except exceptions.PagedInvalidAddressException as excp:
invalid_page_count += 1
page_size = (1 << excp.invalid_bits)
if page_size != expected_page_size:
large_invalid_count += 1
except exceptions.InvalidAddressException as excp:
other_invalid += 1
page_size = expected_page_size
vollog.debug("A non-page lookup invalid address exception occurred at: {} in layer {}".format(
hex(excp.invalid_address), excp.layer_name))
page_addr += page_size
self._progress_callback((page_addr * 100) / layer.maximum_address, "Reading memory")
yield (0, (page_count, large_page_count, swap_count, large_swap_count, invalid_page_count, large_invalid_count,
other_invalid))
def run(self):
return renderers.TreeGrid([("Valid pages (all)", int), ("Valid pages (large)", int),
("Swapped Pages (all)", int), ("Swapped Pages (large)", int),
("Invalid Pages (all)", int), ("Invalid Pages (large)", int),
("Other Invalid Pages (all)", int)], self._generator())
|
import requests
import uuid
import json
from datetime import datetime
from django.utils.dateparse import parse_duration
from django.utils.text import slugify
from django.urls import reverse
from django.contrib.auth.models import User
from django.conf import settings
from django.test import TestCase
from urllib.parse import urlparse, parse_qs
from apps.mymedicare_cb.views import generate_nonce
from apps.mymedicare_cb.models import AnonUserState
from apps.mymedicare_cb.authorization import OAuth2Config
from apps.capabilities.models import ProtectedCapability
from httmock import urlmatch, all_requests, HTTMock
from django.contrib.auth.models import Group
from apps.fhir.server.models import ResourceRouter
from apps.dot_ext.models import Approval, Application
from .responses import patient_response
class MyMedicareBlueButtonClientApiUserInfoTest(TestCase):
"""
Tests for the MyMedicare login and SLS Callback
"""
def setUp(self):
self.callback_url = reverse('mymedicare-sls-callback')
self.login_url = reverse('mymedicare-login')
Group.objects.create(name='BlueButton')
ResourceRouter.objects.create(pk=settings.FHIR_SERVER_DEFAULT, fhir_url="http://bogus.com/")
def test_login_url_success(self):
"""
Test well-formed login_url has expected content
"""
fake_login_url = 'https://example.com/login?scope=openid'
with self.settings(ALLOW_CHOOSE_LOGIN=False, MEDICARE_LOGIN_URI=fake_login_url, MEDICARE_REDIRECT_URI='/123'):
response = self.client.get(self.login_url + '?next=/')
self.assertEqual(response.status_code, 302)
query = parse_qs(urlparse(response['Location']).query)
path = response['Location'].split('?')[0]
self.assertEqual(path, 'https://example.com/login')
self.assertEqual(query['redirect_uri'][0], '/123')
def test_callback_url_missing_state(self):
"""
Test callback_url returns HTTP 400 when
necessary GET parameter state is missing.
"""
response = self.client.get(self.callback_url)
self.assertEqual(response.status_code, 400)
def test_authorize_uuid_dne(self):
auth_uri = reverse(
'dot_ext:authorize-instance',
args=[uuid.uuid4()])
response = self.client.get(auth_uri)
self.assertEqual(302, response.status_code)
def _create_capability(self, name, urls, group=None, default=True):
"""
Helper method that creates a ProtectedCapability instance
that controls the access for the set of `urls`.
"""
group = group or self._create_group('test')
capability = ProtectedCapability.objects.create(
default=default,
title=name,
slug=slugify(name),
protected_resources=json.dumps(urls),
group=group)
return capability
def _create_group(self, name):
"""
Helper method that creates a group instance
with `name`.
"""
group, _ = Group.objects.get_or_create(name=name)
return group
def test_authorize_uuid(self):
user = User.objects.create_user(
"bob",
password="bad")
application = Application.objects.create(
redirect_uris="http://test.com",
authorization_grant_type='authorization-code',
name="test01",
user=user)
capability_a = self._create_capability('Capability A', [])
capability_b = self._create_capability('Capability B', [])
application.scope.add(capability_a, capability_b)
approval = Approval.objects.create(
user=user)
auth_uri = reverse(
'dot_ext:authorize-instance',
args=[approval.uuid])
response = self.client.get(auth_uri, data={
"client_id": application.client_id,
"redirect_uri": "http://test.com",
"response_type": "code"})
self.assertEqual(200, response.status_code)
approval.refresh_from_db()
self.assertEqual(application, approval.application)
self.assertNotIn('_auth_user_id', self.client.session)
response = self.client.post(auth_uri, data={
"client_id": "bad",
"redirect_uri": "http://test.com",
"response_type": "code"})
self.assertEqual(302, response.status_code)
payload = {
'client_id': application.client_id,
'response_type': 'code',
'redirect_uri': 'http://test.com',
'scope': ['capability-a'],
'expires_in': 86400,
'allow': True,
}
response = self.client.post(auth_uri, data=payload)
self.assertEqual(302, response.status_code)
self.assertIn("code=", response.url)
approval.created_at = datetime.now() - parse_duration("601")
approval.save()
response = self.client.post(auth_uri, data={
"client_id": application.client_id,
"redirect_uri": "http://test.com",
"response_type": "code"})
self.assertEqual(302, response.status_code)
def test_callback_url_success(self):
# create a state
state = generate_nonce()
AnonUserState.objects.create(
state=state,
next_uri="http://www.google.com?client_id=test&redirect_uri=test.com&response_type=token&state=test")
# mock sls token endpoint
@urlmatch(netloc='dev.accounts.cms.gov', path='/v1/oauth/token')
def sls_token_mock(url, request):
return {
'status_code': 200,
'content': {'access_token': 'works'},
}
# mock sls user info endpoint
@urlmatch(netloc='dev.accounts.cms.gov', path='/v1/oauth/userinfo')
def sls_user_info_mock(url, request):
return {
'status_code': 200,
'content': {
'sub': '0123456789abcdefghijklmnopqrstuvwxyz',
'given_name': '',
'family_name': '',
'email': '[email protected]',
},
}
# mock fhir user info endpoint
@urlmatch(netloc='bogus.com', path='/Patient/')
def fhir_patient_info_mock(url, request):
return {
'status_code': 200,
'content': patient_response,
}
@all_requests
def catchall(url, request):
raise Exception(url)
with HTTMock(sls_token_mock,
sls_user_info_mock,
fhir_patient_info_mock,
catchall):
response = self.client.get(self.callback_url, data={'code': 'test', 'state': state})
# assert http redirect
self.assertEqual(response.status_code, 302)
self.assertIn("client_id=test", response.url)
self.assertIn("redirect_uri=test.com", response.url)
# self.assertRedirects(response, "http://www.google.com", fetch_redirect_response=False)
# assert login
self.assertNotIn('_auth_user_id', self.client.session)
def test_callback_url_failure(self):
# create a state
state = generate_nonce()
AnonUserState.objects.create(state=state, next_uri="http://www.google.com")
@all_requests
def catchall(url, request):
return {
'status_code': 403,
'content': {'error': 'nope'},
}
with HTTMock(catchall):
response = self.client.get(self.callback_url, data={'code': 'test', 'state': state})
# assert http redirect
self.assertEqual(response.status_code, 502)
def test_sls_token_exchange_w_creds(self):
with self.settings(SLS_CLIENT_ID="test",
SLS_CLIENT_SECRET="stest"):
sls_client = OAuth2Config()
@all_requests
def catchall(url, request):
sls_auth_header = request.headers['Authorization']
self.assertEqual(sls_auth_header, 'Basic dGVzdDpzdGVzdA==')
return {
'status_code': 200,
'content': {
'access_token': 'test_tkn',
},
}
with HTTMock(catchall):
tkn = sls_client.exchange("test_code")
self.assertEquals(tkn, "test_tkn")
def test_failed_sls_token_exchange(self):
with self.settings(SLS_CLIENT_ID="test",
SLS_CLIENT_SECRET="stest"):
sls_client = OAuth2Config()
@all_requests
def catchall(url, request):
sls_auth_header = request.headers['Authorization']
self.assertEqual(sls_auth_header, 'Basic dGVzdDpzdGVzdA==')
return {
'status_code': 401,
'content': {
'error': 'nope!',
},
}
with HTTMock(catchall):
with self.assertRaises(requests.exceptions.HTTPError):
tkn = sls_client.exchange("test_code")
self.assertEquals(tkn, "test_tkn")
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name='torchdrift',
version='0.1.0.post1',
description="Drift Detection for PyTorch",
long_description=long_description,
long_description_content_type='text/markdown',
author='Orobix Srl and MathInf GmbH',
url='https://torchdrift.org/',
install_requires=['torch'],
packages=setuptools.find_packages(),
)
|
# -*- coding: utf-8 -*-
"""
CRUD-storage repositories for SQLAlchemy driver.
"""
from app.drivers.sqlalchemy.crud import Repository
from app.drivers.sqlalchemy.repos.name import NameRepository
from app.drivers.sqlalchemy.repos.role import RoleRepository
from app.drivers.sqlalchemy.repos.user import UserRepository
__all__ = [
'NameRepository',
'Repository',
'RoleRepository',
'UserRepository',
]
|
def vars_recurse(obj):
"""
Recursively collect vars() of the object.
Parameters
----------
obj : object
Object to collect attributes
Returns
-------
params : dict
Dictionary of object parameters.
"""
if hasattr(obj, '__dict__'):
params = vars(obj)
for k in params.keys():
if hasattr(params[k], '__dict__'):
params[k] = vars_recurse(params[k])
params = dict(list(params.items()) + [('type', type(obj))])
return params
raise ValueError("obj does not have __dict__ attribute")
class Base:
"""
Base object class for nnrf.
"""
def __init__(self):
pass
def get_params(self):
"""
Get all parameters of the object, recursively.
Returns
-------
params : dict
Dictionary of object parameters.
"""
params = vars(self)
for k in params.keys():
if hasattr(params[k], 'get_params'):
params[k] = dict(list(k.get_params().items()) + \
[('type', type(self))])
elif isinstance(params[k], np.random.RandomState):
params[k] = {'type': np.random.RandomState,
'seed': params[k].get_state()}
elif hasattr(params[k], '__dict__'):
params[k] = vars_recurse(params[k])
params = dict(list(params.items()) + [('type', type(self))])
return params
def set_params(self, params):
"""
Set the attributes of the object with the given
parameters.
Parameters
----------
params : dict
Dictionary of object parameters.
Returns
-------
self : Base
Itself, with parameters set.
"""
valid = self.get_params().keys()
for k, v in params.items():
if k not in valid:
raise ValueError("Invalid parameter %s for object %s" % \
(k, self.__name__))
param = v
if isinstance(v, dict) and 'type' in v.keys():
t = v['type']
if t == np.random.RandomState:
state = v['seed']
param = np.random.RandomState().set_state(state)
elif 'set_params' in dir(t):
param = t().set_params(v.pop('type'))
else:
param = t()
for p, p_v in v.pop('type').items():
setattr(param, p, p_v)
setattr(self, k, param)
return self
|
from rpkflashtool.app import run
run()
|
"""Module output_file."""
__author__ = 'Joan A. Pinol (japinol)'
import logging
from life import constants as consts
# Errors
ERROR_OUT_FILE_OPEN = "!!! ERROR: Output file: %s. Program aborted !!!"
ERROR_OUT_FILE_WRITING = "!!! ERROR writing output file: %s. Some information has been lost!!!"
ERROR_OUT_FILE_MAX_TRIES = "!!! ERROR: Too much tries failed writing to the output file: %s!!!"
# Max writing errors when trying to write the buffer to the output file
MAX_ERRORS_OUT_FILE = 4
logging.basicConfig(format=consts.LOGGER_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class OutputFile:
"""Manages information for output to file purposes."""
def __init__(self, out_file):
self._out_file = out_file
self._str_out = ['']
self._write_data_to_file(open_method='w')
def write_buffer_before_exit(self):
"""Writes pending buffer to the output file.
It is intended to be called before exit the application.
"""
self._write_data_to_file()
def write_buffer(self):
self._write_data_to_file()
def write_line(self, line):
"""Writes a line to the output file."""
self._str_out.append(f'{line}\n')
def _write_data_to_file(self, open_method='a'):
"""Writes the data still in the buffer to the output file."""
if not self._str_out:
return
try:
with open(self._out_file, open_method, encoding='utf-8') as out_file:
for line in self._str_out:
out_file.write(line)
except Exception:
if open_method == 'w':
logger.critical(ERROR_OUT_FILE_OPEN % self._out_file)
exit()
else:
self._num_errors_out_file += 1
if self._num_errors_out_file >= MAX_ERRORS_OUT_FILE:
logger.critical(ERROR_OUT_FILE_MAX_TRIES % self._out_file)
exit()
return
self._str_out = []
|
def flatten_mock_calls(mock):
"""
Flatten the calls performed on a particular mock object,
into a list of calls with arguments.
"""
result = []
for call in mock.mock_calls:
call = list(call)
call_name = call[0]
if '.' in str(call_name):
call_name = str(call_name).split('.')[-1]
result.append([call_name] + call[1:])
return result
|
#!/usr/bin/env python
from __future__ import print_function
import collections
import json
import sys
import ipaddress
clients = dict()
def agg_ip(ip):
mask = 48 if ":" in ip else 16
addr = "%s/%d" % (ip, mask)
net = ipaddress.ip_network(unicode(addr), strict=False)
return str(net)
for arg in sys.argv[1:]:
with open(arg) as f:
data = json.loads(f.read())
for ip, _histogram in data["clients"].items():
client = agg_ip(ip)
__histogram = dict((int(k), int(v)) for (k, v) in _histogram.items())
histogram = collections.Counter(__histogram)
clients.setdefault(client, collections.Counter())
clients[client] += histogram
print(json.dumps(clients))
|
# https://blog.csdn.net/rizero/article/details/104244454
# 股票成交量预测(Pytorch基础练习)
## depends
import pandas as pd
import torch
import torch.nn
import torch.optim
from debug import ptf_tensor
## raw data
url = 'C:/Users/HUAWEI/Desktop/深度学习/Blog附带代码/FB.csv'
df = pd.read_csv(url, index_col=0) #读取全部数据
index_col = ['col_1','col_2'] # 读取指定的几列
error_bad_lines = False # 当某行数据有问题时,不报错,直接跳过,处理脏数据时使用
na_values = 'NULL' # 将NULL识别为空值
## data clean
#数据集的处理
'''
因为数据是日期新的占index靠前
'''
train_start, train_end=sum(df.index>='2017'),sum(df.index>='2013')
test_start, test_end=sum(df.index>='2018'),sum(df.index>='2017')
n_total_train = train_end -train_start
n_total_test = test_end -test_start
s_mean=df[train_start:train_end].mean() #计算均值,为归一化做准备
s_std=df[train_start:train_end].std() # 计算标准差,为归一化做准备
n_features=5 # 五个特征量
#选取col from 0-4 也就是Open,High,Low,Close,Volume,并进行归一化
df_feature=((df-s_mean)/s_std).iloc[:,:n_features]
s_labels=(df['Volume']<df['Volume'].shift(1)).astype(int)
##.shift(1)把数据下移一位
#用法参见:https://www.zhihu.com/question/264963268
#label建立的标准:假如今天次日的成交量大于当日的成交量,标签=1,反之=0
## alter format
x=torch.tensor(df_feature.values,dtype=torch.float32) # size: [m,5]
ptf_tensor(x,'x')
y=torch.tensor(s_labels.values.reshape(-1,1),dtype=torch.float32) # size [m,1]
ptf_tensor(y,'y')
## build nn
fc=torch.nn.Linear(n_features,1)
weights,bias=fc.parameters()
criterion=torch.nn.BCEWithLogitsLoss()
optimizer=torch.optim.Adam(fc.parameters())
## train w+ check
n_steps=20001 #迭代20001次
for step in range(n_steps):
if step:
optimizer.zero_grad() # 梯度清零,不然会叠加的
loss.backward() # 计算参数的梯度
optimizer.step() # 根据参数梯度结果迭代推出新的参数
pred=fc(x) # 计算预测结果
loss=criterion(pred[train_start:train_end],y[train_start:train_end]) #计算预测的损失
if step % 500==0:
#print('#{}, 损失 = {:g}'.format(step, loss))
output = (pred > 0)
correct = (output == y.bool())
n_correct_train = correct[train_start:train_end].sum().item() #计算训练正确的数量
n_correct_test = correct[test_start:test_end].sum().item() #计算测试正确的数量
accuracy_train = n_correct_train / n_total_train #计算精确度
accuracy_test = n_correct_test / n_total_test
print('训练集准确率 = {}, 测试集准确率 = {}'.format(accuracy_train, accuracy_test))
##
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"There is not a whole lot to test, but this does get us basic coverage."
import hashlib
import unittest
from ..common import Checksum
from ..repo_objects import RepoMetadata, Rpm
from ..tests import temp_repos as tr
class RepoObjectsTestCase(unittest.TestCase):
def test_checksum(self):
self.assertIs(
type(hashlib.new("sha1")),
type(Checksum(algorithm="sha", hexdigest=None).hasher()),
)
chk = Checksum(algorithm="sha256", hexdigest=None)
h = chk.hasher()
h.update(b"crouton")
chk = chk._replace(hexdigest=h.hexdigest())
self.assertEqual(chk, Checksum.from_string(str(chk)))
def test_rpm(self):
rpm = Rpm(
epoch=2,
name="foo-bar",
version=2,
release="rc0",
arch="aarch64",
build_timestamp=1337,
checksum=Checksum.from_string("algo:fabcab"),
canonical_checksum=None,
location="a/b.rpm",
size=14,
source_rpm="foo-bar-2-rc0.src.rpm",
)
self.assertEqual(rpm.nevra(), "foo-bar-2:2-rc0.aarch64")
self.assertEqual("algo:fabcab", str(rpm.best_checksum()))
self.assertEqual(
"zalgo:e1de41",
str(
rpm._replace(
canonical_checksum=Checksum(
algorithm="zalgo", hexdigest="e1de41"
)
).best_checksum()
),
)
def test_repodata_and_metadata(self):
with tr.temp_repos_steps(
gpg_signing_key=tr.get_test_signing_key(),
repo_change_steps=[
{
"whale": tr.Repo(
[tr.Rpm("x", "5", "6"), tr.Rpm("y", "3.4", "b")]
)
}
],
) as repos_dir, open(
repos_dir / "0/whale/repodata/repomd.xml", "rb"
) as infile:
rmd = RepoMetadata.new(xml=infile.read())
self.assertGreaterEqual(rmd.fetch_timestamp, rmd.build_timestamp)
# If this assert fires, you are changing the canonical hash,
# which is super-risky since it will break the existing DB. So,
# this test just exists to make sure you plan to migrate all the
# canonical hashes in the database.
self.assertEqual("sha384", rmd.checksum.algorithm)
self.assertIs(rmd.checksum, rmd.best_checksum())
self.assertEqual(
1, sum(rd.is_primary_sqlite() for rd in rmd.repodatas)
)
self.assertEqual(
1, sum(rd.is_primary_xml() for rd in rmd.repodatas)
)
for rd in rmd.repodatas:
# The currently checked-in test repos all use sha256, which
# seems to be the default for newer rpm tools.
self.assertEqual("sha256", rd.checksum.algorithm)
self.assertEqual(64, len(rd.checksum.hexdigest))
self.assertLess(0, rd.size)
self.assertLessEqual(rd.build_timestamp, rmd.build_timestamp)
self.assertLess(0, rd.build_timestamp)
self.assertIs(rd.checksum, rd.best_checksum())
|
from sketchpy import canvas
pen = canvas.sketch_from_svg('C:\\Users\\SHUBHAM\\Desktop\\New folder\\sketch\\4\\mehulnew.svg',scale= 250 )
pen.draw()
|
"""
GUI for the data operations panel (sum and multiply)
"""
import wx
import sys
import time
import numpy as np
from sas.sascalc.dataloader.data_info import Data1D
from sas.sasgui.plottools.PlotPanel import PlotPanel
from sas.sasgui.plottools.plottables import Graph
from sas.sasgui.plottools import transform
from matplotlib.font_manager import FontProperties
from sas.sasgui.guiframe.events import StatusEvent
from sas.sasgui.perspectives.calculator import calculator_widgets as widget
from sas.sasgui.guiframe.documentation_window import DocumentationWindow
#Control panel width
if sys.platform.count("win32") > 0:
PANEL_TOP = 0
PANEL_WIDTH = 790
PANEL_HEIGTH = 370
FONT_VARIANT = 0
_BOX_WIDTH = 200
ON_MAC = False
else:
PANEL_TOP = 60
_BOX_WIDTH = 230
PANEL_WIDTH = 900
PANEL_HEIGTH = 430
FONT_VARIANT = 1
ON_MAC = True
class DataOperPanel(wx.ScrolledWindow):
"""
"""
def __init__(self, parent, *args, **kwds):
kwds['name'] = "Data Operation"
kwds["size"] = (PANEL_WIDTH, PANEL_HEIGTH)
wx.ScrolledWindow.__init__(self, parent, *args, **kwds)
self.parent = parent
#sizers etc.
self.main_sizer = None
self.name_sizer = None
self.button_sizer = None
self.data_namectr = None
self.numberctr = None
self.data1_cbox = None
self.operator_cbox = None
self.data2_cbox = None
self.data_title_tcl = None
self.out_pic = None
self.equal_pic = None
self.data1_pic = None
self.operator_pic = None
self.data2_pic = None
self.output = None
self._notes = None
#text grayed color
self.color = wx.SystemSettings.GetColour(wx.SYS_COLOUR_BACKGROUND)
#data
self._data = self.get_datalist()
self._do_layout()
self.fill_data_combox()
self.fill_oprator_combox()
self.Bind(wx.EVT_SET_FOCUS, self.set_panel_on_focus)
def _define_structure(self):
"""
define initial sizer
"""
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
title = "Data Operation "
title += "[ + (add); - (subtract); "
title += "* (multiply); / (divide); "
title += "| (append) ]"
name_box = wx.StaticBox(self, -1, title)
self.name_sizer = wx.StaticBoxSizer(name_box, wx.HORIZONTAL)
self.button_sizer = wx.BoxSizer(wx.HORIZONTAL)
def _layout_name(self):
"""
Do the layout for data name related widgets
"""
new_data_sizer = wx.BoxSizer(wx.VERTICAL)
equal_sizer = wx.BoxSizer(wx.VERTICAL)
old_data1_sizer = wx.BoxSizer(wx.VERTICAL)
operator_sizer = wx.BoxSizer(wx.VERTICAL)
old_data2_sizer = wx.BoxSizer(wx.VERTICAL)
data2_hori_sizer = wx.BoxSizer(wx.HORIZONTAL)
data_name = wx.StaticText(self, -1, 'Output Data Name')
equal_name = wx.StaticText(self, -1, ' =', size=(50, 25))
data1_name = wx.StaticText(self, -1, 'Data1')
operator_name = wx.StaticText(self, -1, 'Operator')
data2_name = wx.StaticText(self, -1, 'Data2 (or Number)')
self.data_namectr = wx.TextCtrl(self, -1, size=(_BOX_WIDTH, 25), style=wx.TE_PROCESS_ENTER)
self.data_namectr.SetToolTipString("Hit 'Enter' key after typing.")
self.data_namectr.SetValue(str('MyNewDataName'))
self.numberctr = wx.TextCtrl(self, -1, size=(_BOX_WIDTH / 3, 25), style=wx.TE_PROCESS_ENTER)
self.numberctr.SetToolTipString("Hit 'Enter' key after typing.")
self.numberctr.SetValue(str(1.0))
self.data1_cbox = wx.ComboBox(self, -1, size=(_BOX_WIDTH, 25),
style=wx.CB_READONLY)
self.operator_cbox = wx.ComboBox(self, -1, size=(70, 25),
style=wx.CB_READONLY)
operation_tip = "Add: +, Subtract: -, "
operation_tip += "Multiply: *, Divide: /, "
operation_tip += "Append(Combine): | "
self.operator_cbox.SetToolTipString(operation_tip)
self.data2_cbox = wx.ComboBox(self, -1, size=(_BOX_WIDTH * 2 / 3, 25),
style=wx.CB_READONLY)
self.out_pic = SmallPanel(self, -1, True,
size=(_BOX_WIDTH, _BOX_WIDTH),
style=wx.NO_BORDER)
self.equal_pic = SmallPanel(self, -1, True, '=',
size=(50, _BOX_WIDTH),
style=wx.NO_BORDER)
self.data1_pic = SmallPanel(self, -1, True,
size=(_BOX_WIDTH, _BOX_WIDTH),
style=wx.NO_BORDER)
self.operator_pic = SmallPanel(self, -1, True, '+',
size=(70, _BOX_WIDTH),
style=wx.NO_BORDER)
self.data2_pic = SmallPanel(self, -1, True,
size=(_BOX_WIDTH, _BOX_WIDTH),
style=wx.NO_BORDER)
for ax in self.equal_pic.axes:
ax.set_frame_on(False)
for ax in self.operator_pic.axes:
ax.set_frame_on(False)
new_data_sizer.AddMany([(data_name, 0, wx.LEFT, 3),
(self.data_namectr, 0, wx.LEFT, 3),
(self.out_pic, 0, wx.LEFT, 3)])
equal_sizer.AddMany([(13, 13), (equal_name, 0, wx.LEFT, 3),
(self.equal_pic, 0, wx.LEFT, 3)])
old_data1_sizer.AddMany([(data1_name, 0, wx.LEFT, 3),
(self.data1_cbox, 0, wx.LEFT, 3),
(self.data1_pic, 0, wx.LEFT, 3)])
operator_sizer.AddMany([(operator_name, 0, wx.LEFT, 3),
(self.operator_cbox, 0, wx.LEFT, 3),
(self.operator_pic, 0, wx.LEFT, 3)])
data2_hori_sizer.AddMany([(self.data2_cbox, 0, wx.LEFT, 0),
(self.numberctr, 0, wx.RIGHT, 0)])
old_data2_sizer.AddMany([(data2_name, 0, wx.LEFT, 3),
(data2_hori_sizer, 0, wx.LEFT, 3),
(self.data2_pic, 0, wx.LEFT, 3)])
self.name_sizer.AddMany([(new_data_sizer, 0, wx.LEFT | wx.TOP, 5),
(equal_sizer, 0, wx.TOP, 5),
(old_data1_sizer, 0, wx.TOP, 5),
(operator_sizer, 0, wx.TOP, 5),
(old_data2_sizer, 0, wx.TOP, 5)])
self.data2_cbox.Show(True)
self._show_numctrl(self.numberctr, False)
wx.EVT_TEXT_ENTER(self.data_namectr, -1, self.on_name)
wx.EVT_TEXT(self.numberctr, -1, self.on_number)
wx.EVT_COMBOBOX(self.data1_cbox, -1, self.on_select_data1)
wx.EVT_COMBOBOX(self.operator_cbox, -1, self.on_select_operator)
wx.EVT_COMBOBOX(self.data2_cbox, -1, self.on_select_data2)
def _show_numctrl(self, ctrl, enable=True):
"""
Show/Hide on Win
Enable/Disable on MAC
"""
if ON_MAC:
ctrl.Enable(enable)
children = ctrl.GetChildren()
if len(children) > 0:
ctrl.GetChildren()[0].SetBackGroundColour(self.color)
if enable:
wx.EVT_TEXT_ENTER(self.numberctr, -1, self.on_number)
else:
if not ctrl.IsEnabled():
ctrl.Enable(True)
ctrl.Show(enable)
def on_name(self, event=None):
"""
On data name typing
"""
if event is not None:
event.Skip()
item = event.GetEventObject()
if item.IsEnabled():
self._set_textctrl_color(item, 'white')
else:
self._set_textctrl_color(item, self.color)
text = item.GetValue().strip()
self._check_newname(text)
def _check_newname(self, name=None):
"""
Check name ctr strings
"""
self.send_warnings('')
msg = ''
if name is None:
text = self.data_namectr.GetValue().strip()
else:
text = name
state_list = self.get_datalist().values()
name_list = []
for state in state_list:
if state.data is None:
theory_list = state.get_theory()
theory, _ = theory_list.values()[0]
d_name = str(theory.name)
else:
d_name = str(state.data.name)
name_list.append(d_name)
if text in name_list:
self._set_textctrl_color(self.data_namectr, 'pink')
msg = "DataOperation: The name already exists."
if len(text) == 0:
self._set_textctrl_color(self.data_namectr, 'pink')
msg = "DataOperation: Type the data name first."
if self._notes:
self.send_warnings(msg, 'error')
self.name_sizer.Layout()
self.Refresh()
def _set_textctrl_color(self, ctrl, color):
"""
Set TextCtrl color
"""
if ON_MAC:
children = ctrl.GetChildren()
if len(children) > 0:
children[0].SetBackgroundColour(color)
else:
ctrl.SetBackgroundColour(color)
self.name_sizer.Layout()
def on_number(self, event=None, control=None):
"""
On selecting Number for Data2
"""
self.send_warnings('')
item = control
if item is None and event is not None:
item = event.GetEventObject()
elif item is None:
raise ValueError("Event or control must be supplied")
text = item.GetValue().strip()
if self.numberctr.IsShown():
if self.numberctr.IsEnabled():
self._set_textctrl_color(self.numberctr, 'white')
try:
val = float(text)
pos = self.data2_cbox.GetCurrentSelection()
self.data2_cbox.SetClientData(pos, val)
except:
self._set_textctrl_color(self.numberctr, 'pink')
if event is None:
msg = "DataOperation: Number requires a float number."
self.send_warnings(msg, 'error')
return False
else:
self._set_textctrl_color(self.numberctr, self.color)
self.put_text_pic(self.data2_pic, content=str(val))
self.check_data_inputs()
if self.output is not None:
self.output.name = str(self.data_namectr.GetValue())
self.draw_output(self.output)
self.Refresh()
return True
def on_select_data1(self, event=None):
"""
On select data1
"""
self.send_warnings('')
item = event.GetEventObject()
pos = item.GetCurrentSelection()
data = item.GetClientData(pos)
if data is None:
content = "?"
self.put_text_pic(self.data1_pic, content)
else:
self.data1_pic.add_image(data)
self.check_data_inputs()
if self.output is not None:
self.output.name = str(self.data_namectr.GetValue())
self.draw_output(self.output)
def on_select_operator(self, event=None):
"""
On Select an Operator
"""
self.send_warnings('')
item = event.GetEventObject()
text = item.GetValue().strip()
self.put_text_pic(self.operator_pic, content=text)
self.check_data_inputs()
if self.output is not None:
self.output.name = str(self.data_namectr.GetValue())
self.draw_output(self.output)
def on_select_data2(self, event=None):
"""
On Selecting Data2
"""
self.send_warnings('')
item = event.GetEventObject()
text = item.GetValue().strip().lower()
self._show_numctrl(self.numberctr, text == 'number')
pos = item.GetCurrentSelection()
data = item.GetClientData(pos)
content = "?"
if not (self.numberctr.IsShown() and self.numberctr.IsEnabled()):
if data is None:
content = "?"
self.put_text_pic(self.data2_pic, content)
else:
self.data2_pic.add_image(data)
self.check_data_inputs()
else:
content = str(self.numberctr.GetValue().strip())
try:
content = float(content)
data = content
except:
self._set_textctrl_color(self.numberctr, 'pink')
content = "?"
data = None
item.SetClientData(pos, data)
if data is not None:
self.check_data_inputs()
self.put_text_pic(self.data2_pic, content)
if self.output is not None:
self.output.name = str(self.data_namectr.GetValue())
self.draw_output(self.output)
def put_text_pic(self, pic=None, content=''):
"""
Put text to the pic
"""
pic.set_content(content)
pic.add_text()
pic.draw()
def check_data_inputs(self):
"""
Check data1 and data2 whether or not they are ready for operation
"""
self._set_textctrl_color(self.data1_cbox, 'white')
self._set_textctrl_color(self.data2_cbox, 'white')
flag = False
pos1 = self.data1_cbox.GetCurrentSelection()
data1 = self.data1_cbox.GetClientData(pos1)
if data1 is None:
self.output = None
return flag
pos2 = self.data2_cbox.GetCurrentSelection()
data2 = self.data2_cbox.GetClientData(pos2)
if data2 is None:
self.output = None
return flag
if self.numberctr.IsShown():
if self.numberctr.IsEnabled():
self._set_textctrl_color(self.numberctr, 'white')
try:
float(data2)
if self.operator_cbox.GetValue().strip() == '|':
msg = "DataOperation: This operation can not accept "
msg += "a float number."
self.send_warnings(msg, 'error')
self._set_textctrl_color(self.numberctr, 'pink')
self.output = None
return flag
except:
msg = "DataOperation: Number requires a float number."
self.send_warnings(msg, 'error')
self._set_textctrl_color(self.numberctr, 'pink')
self.output = None
return flag
else:
self._set_textctrl_color(self.numberctr, self.color)
elif data1.__class__.__name__ != data2.__class__.__name__:
self._set_textctrl_color(self.data1_cbox, 'pink')
self._set_textctrl_color(self.data2_cbox, 'pink')
msg = "DataOperation: Data types must be same."
self.send_warnings(msg, 'error')
self.output = None
return flag
try:
self.output = self.make_data_out(data1, data2)
except:
self._check_newname()
self._set_textctrl_color(self.data1_cbox, 'pink')
self._set_textctrl_color(self.data2_cbox, 'pink')
msg = "DataOperation: %s" % sys.exc_value
self.send_warnings(msg, 'error')
self.output = None
return flag
return True
def make_data_out(self, data1, data2):
"""
Make a temp. data output set
"""
output = None
pos = self.operator_cbox.GetCurrentSelection()
operator = self.operator_cbox.GetClientData(pos)
try:
exec "output = data1 %s data2" % operator
except:
raise
return output
def draw_output(self, output):
"""
Draw output data(temp)
"""
out = self.out_pic
if output is None:
content = "?"
self.put_text_pic(out, content)
else:
out.add_image(output)
wx.CallAfter(self.name_sizer.Layout)
self.Layout()
self.Refresh()
def _layout_button(self):
"""
Do the layout for the button widgets
"""
self.bt_apply = wx.Button(self, -1, "Apply", size=(_BOX_WIDTH / 2, -1))
app_tip = "Generate the Data and send to Data Explorer."
self.bt_apply.SetToolTipString(app_tip)
self.bt_apply.Bind(wx.EVT_BUTTON, self.on_click_apply)
self.bt_help = wx.Button(self, -1, "HELP")
app_tip = "Get help on Data Operations."
self.bt_help.SetToolTipString(app_tip)
self.bt_help.Bind(wx.EVT_BUTTON, self.on_help)
self.bt_close = wx.Button(self, -1, 'Close', size=(_BOX_WIDTH / 2, -1))
self.bt_close.Bind(wx.EVT_BUTTON, self.on_close)
self.bt_close.SetToolTipString("Close this panel.")
self.button_sizer.AddMany([(PANEL_WIDTH / 2, 25),
(self.bt_apply, 0, wx.RIGHT, 10),
(self.bt_help, 0, wx.RIGHT, 10),
(self.bt_close, 0, wx.RIGHT, 10)])
def _do_layout(self):
"""
Draw the current panel
"""
self._define_structure()
self._layout_name()
self._layout_button()
self.main_sizer.AddMany([(self.name_sizer, 0, wx.EXPAND | wx.ALL, 10),
(self.button_sizer, 0,
wx.EXPAND | wx.TOP | wx.BOTTOM, 5)])
self.SetSizer(self.main_sizer)
self.SetScrollbars(20, 20, 25, 65)
self.SetAutoLayout(True)
def set_panel_on_focus(self, event):
"""
On Focus at this window
"""
if event is not None:
event.Skip()
self._data = self.get_datalist()
if ON_MAC:
self.fill_data_combox()
else:
children = self.GetChildren()
# update the list only when it is on the top
if self.FindFocus() in children:
self.fill_data_combox()
def fill_oprator_combox(self):
"""
fill the current combobox with the operator
"""
operator_list = [' +', ' -', ' *', " /", " |"]
for oper in operator_list:
pos = self.operator_cbox.Append(str(oper))
self.operator_cbox.SetClientData(pos, str(oper.strip()))
self.operator_cbox.SetSelection(0)
def fill_data_combox(self):
"""
fill the current combobox with the available data
"""
pos_pre1 = self.data1_cbox.GetCurrentSelection()
pos_pre2 = self.data2_cbox.GetCurrentSelection()
current1 = self.data1_cbox.GetLabel()
current2 = self.data2_cbox.GetLabel()
if pos_pre1 < 0:
pos_pre1 = 0
if pos_pre2 < 0:
pos_pre2 = 0
self.data1_cbox.Clear()
self.data2_cbox.Clear()
if not self._data:
pos = self.data1_cbox.Append('No Data Available')
self.data1_cbox.SetSelection(pos)
self.data1_cbox.SetClientData(pos, None)
pos2 = self.data2_cbox.Append('No Data Available')
self.data2_cbox.SetSelection(pos2)
self.data2_cbox.SetClientData(pos2, None)
return
pos1 = self.data1_cbox.Append('Select Data')
self.data1_cbox.SetSelection(pos1)
self.data1_cbox.SetClientData(pos1, None)
pos2 = self.data2_cbox.Append('Select Data')
self.data2_cbox.SetSelection(pos2)
self.data2_cbox.SetClientData(pos2, None)
pos3 = self.data2_cbox.Append('Number')
val = None
if (self.numberctr.IsShown() and self.numberctr.IsEnabled()):
try:
val = float(self.numberctr.GetValue())
except:
val = None
self.data2_cbox.SetClientData(pos3, val)
dnames = []
ids = self._data.keys()
for id in ids:
if id is not None:
if self._data[id].data is not None:
dnames.append(self._data[id].data.name)
else:
theory_list = self._data[id].get_theory()
theory, _ = theory_list.values()[0]
dnames.append(theory.name)
ind = np.argsort(dnames)
if len(ind) > 0:
val_list = np.array(self._data.values())[ind]
for datastate in val_list:
data = datastate.data
if data is not None:
name = data.name
pos1 = self.data1_cbox.Append(str(name))
self.data1_cbox.SetClientData(pos1, data)
pos2 = self.data2_cbox.Append(str(name))
self.data2_cbox.SetClientData(pos2, data)
if str(current1) == str(name):
pos_pre1 = pos1
if str(current2) == str(name):
pos_pre2 = pos2
try:
theory_list = datastate.get_theory()
for theory, _ in theory_list.values():
th_name = theory.name
posth1 = self.data1_cbox.Append(str(th_name))
self.data1_cbox.SetClientData(posth1, theory)
posth2 = self.data2_cbox.Append(str(th_name))
self.data2_cbox.SetClientData(posth2, theory)
if str(current1) == str(th_name):
pos_pre1 = posth1
if str(current2) == str(th_name):
pos_pre2 = posth2
except:
continue
self.data1_cbox.SetSelection(pos_pre1)
self.data2_cbox.SetSelection(pos_pre2)
def get_datalist(self):
"""
"""
data_manager = self.parent.parent.get_data_manager()
if data_manager is not None:
return data_manager.get_all_data()
else:
return {}
def on_click_apply(self, event):
"""
changes are saved in data object imported to edit
"""
self.send_warnings('')
self.data_namectr.SetBackgroundColour('white')
state_list = self.get_datalist().values()
name = self.data_namectr.GetValue().strip()
name_list = []
for state in state_list:
if state.data is None:
theory_list = state.get_theory()
theory, _ = theory_list.values()[0]
d_name = str(theory.name)
else:
d_name = str(state.data.name)
name_list.append(d_name)
if name in name_list:
self._set_textctrl_color(self.data_namectr, 'pink')
msg = "The Output Data Name already exists... "
wx.MessageBox(msg, 'Error')
return
if name == '':
self._set_textctrl_color(self.data_namectr, 'pink')
msg = "Please type the output data name first... "
wx.MessageBox(msg, 'Error')
return
if self.output is None:
msg = "No Output Data has been generated... "
wx.MessageBox(msg, 'Error')
return
if self.numberctr.IsEnabled() and self.numberctr.IsShown():
valid_num = self.on_number(control=self.numberctr)
if not valid_num:
return
# send data to data manager
self.output.name = name
self.output.run = "Data Operation"
self.output.instrument = "SasView"
self.output.id = str(name) + str(time.time())
data = {self.output.id :self.output}
self.parent.parent.add_data(data)
self.name_sizer.Layout()
self.Refresh()
#must post event here
event.Skip()
def on_help(self, event):
"""
Bring up the Data Operations Panel Documentation whenever
the HELP button is clicked.
Calls DocumentationWindow with the path of the location within the
documentation tree (after /doc/ ....". Note that when using old
versions of Wx (before 2.9) and thus not the release version of
installers, the help comes up at the top level of the file as
webbrowser does not pass anything past the # to the browser when it is
running "file:///...."
:param evt: Triggers on clicking the help button
"""
_TreeLocation = "user/sasgui/perspectives/calculator/"
_TreeLocation += "data_operator_help.html"
_doc_viewer = DocumentationWindow(self, -1, _TreeLocation, "",
"Data Operation Help")
def disconnect_panels(self):
"""
"""
self.out_pic.connect.disconnect()
self.equal_pic.connect.disconnect()
self.data1_pic.connect.disconnect()
self.operator_pic.connect.disconnect()
self.data2_pic.connect.disconnect()
def on_close(self, event):
"""
leave data as it is and close
"""
self.parent.OnClose()
def set_plot_unfocus(self):
"""
Unfocus on right click
"""
def send_warnings(self, msg='', info='info'):
"""
Send warning to status bar
"""
wx.PostEvent(self.parent.parent, StatusEvent(status=msg, info=info))
class SmallPanel(PlotPanel):
"""
PlotPanel for Quick plot and masking plot
"""
def __init__(self, parent, id= -1, is_number=False, content='?', **kwargs):
"""
"""
PlotPanel.__init__(self, parent, id=id, **kwargs)
self.is_number = is_number
self.content = content
self.point = None
self.position = (0.4, 0.5)
self.scale = 'linear'
self.prevXtrans = "x"
self.prevYtrans = "y"
self.viewModel = "--"
self.subplot.set_xticks([])
self.subplot.set_yticks([])
self.add_text()
self.figure.subplots_adjust(left=0.1, bottom=0.1)
def set_content(self, content=''):
"""
Set text content
"""
self.content = str(content)
def add_toolbar(self):
"""
Add toolbar
"""
# Not implemented
pass
def on_set_focus(self, event):
"""
send to the parenet the current panel on focus
"""
pass
def add_image(self, plot):
"""
Add Image
"""
self.content = ''
self.textList = []
self.plots = {}
self.clear()
self.point = plot
try:
self.figure.delaxes(self.figure.axes[0])
self.subplot = self.figure.add_subplot(111)
#self.figure.delaxes(self.figure.axes[1])
except:
pass
try:
name = plot.name
except:
name = plot.filename
self.plots[name] = plot
#init graph
self.graph = Graph()
#add plot
self.graph.add(plot)
#draw
self.graph.render(self)
try:
self.figure.delaxes(self.figure.axes[1])
except:
pass
self.subplot.figure.canvas.resizing = False
self.subplot.tick_params(axis='both', labelsize=9)
# Draw zero axis lines
self.subplot.axhline(linewidth=1, color='r')
self.subplot.axvline(linewidth=1, color='r')
self.erase_legend()
try:
# mpl >= 1.1.0
self.figure.tight_layout()
except:
self.figure.subplots_adjust(left=0.1, bottom=0.1)
self.subplot.figure.canvas.draw()
def add_text(self):
"""
Text in the plot
"""
if not self.is_number:
return
self.clear()
try:
self.figure.delaxes(self.figure.axes[0])
self.subplot = self.figure.add_subplot(111)
self.figure.delaxes(self.figure.axes[1])
except:
pass
self.subplot.set_xticks([])
self.subplot.set_yticks([])
label = self.content
FONT = FontProperties()
xpos, ypos = (0.4, 0.5)
font = FONT.copy()
font.set_size(14)
self.textList = []
self.subplot.set_xlim((0, 1))
self.subplot.set_ylim((0, 1))
try:
if self.content != '?':
float(label)
except:
self.subplot.set_frame_on(False)
try:
# mpl >= 1.1.0
self.figure.tight_layout()
except:
self.figure.subplots_adjust(left=0.1, bottom=0.1)
if len(label) > 0 and xpos > 0 and ypos > 0:
new_text = self.subplot.text(str(xpos), str(ypos), str(label),
fontproperties=font)
self.textList.append(new_text)
def erase_legend(self):
"""
Remove Legend
"""
#for ax in self.axes:
self.remove_legend(self.subplot)
def onMouseMotion(self, event):
"""
Disable dragging 2D image
"""
def onWheel(self, event):
"""
"""
def onLeftDown(self, event):
"""
Disables LeftDown
"""
def onPick(self, event):
"""
Remove Legend
"""
for ax in self.axes:
self.remove_legend(ax)
def draw(self):
"""
Draw
"""
if self.dimension == 3:
pass
else:
self.subplot.figure.canvas.resizing = False
self.subplot.tick_params(axis='both', labelsize=9)
self.erase_legend()
self.subplot.figure.canvas.draw_idle()
try:
self.figure.delaxes(self.figure.axes[1])
except:
pass
def onContextMenu(self, event):
"""
Default context menu for a plot panel
"""
id = wx.NewId()
slicerpop = wx.Menu()
data = self.point
if issubclass(data.__class__, Data1D):
slicerpop.Append(id, '&Change Scale')
wx.EVT_MENU(self, id, self._onProperties)
else:
slicerpop.Append(id, '&Toggle Linear/Log Scale')
wx.EVT_MENU(self, id, self.ontogglescale)
try:
# mouse event
pos_evt = event.GetPosition()
pos = self.ScreenToClient(pos_evt)
except:
# toolbar event
pos_x, pos_y = self.toolbar.GetPositionTuple()
pos = (pos_x, pos_y + 5)
self.PopupMenu(slicerpop, pos)
def ontogglescale(self, event):
"""
On toggle 2d scale
"""
self._onToggleScale(event)
try:
# mpl >= 1.1.0
self.figure.tight_layout()
except:
self.figure.subplots_adjust(left=0.1, bottom=0.1)
try:
self.figure.delaxes(self.figure.axes[1])
except:
pass
def _onProperties(self, event):
"""
when clicking on Properties on context menu ,
The Property dialog is displayed
The user selects a transformation for x or y value and
a new plot is displayed
"""
list = []
list = self.graph.returnPlottable()
if len(list.keys()) > 0:
first_item = list.keys()[0]
if first_item.x != []:
from sas.sasgui.plottools.PropertyDialog import Properties
dial = Properties(self, -1, 'Change Scale')
# type of view or model used
dial.xvalue.Clear()
dial.yvalue.Clear()
dial.view.Clear()
dial.xvalue.Insert("x", 0)
dial.xvalue.Insert("log10(x)", 1)
dial.yvalue.Insert("y", 0)
dial.yvalue.Insert("log10(y)", 1)
dial.view.Insert("--", 0)
dial.view.Insert("Linear y vs x", 1)
dial.setValues(self.prevXtrans, self.prevYtrans, self.viewModel)
dial.Update()
if dial.ShowModal() == wx.ID_OK:
self.xLabel, self.yLabel, self.viewModel = dial.getValues()
if self.viewModel == "Linear y vs x":
self.xLabel = "x"
self.yLabel = "y"
self.viewModel = "--"
dial.setValues(self.xLabel, self.yLabel, self.viewModel)
self._onEVT_FUNC_PROPERTY()
dial.Destroy()
def _onEVT_FUNC_PROPERTY(self, remove_fit=True):
"""
Receive the x and y transformation from myDialog,
Transforms x and y in View
and set the scale
"""
list = []
list = self.graph.returnPlottable()
# Changing the scale might be incompatible with
# currently displayed data (for instance, going
# from ln to log when all plotted values have
# negative natural logs).
# Go linear and only change the scale at the end.
self.set_xscale("linear")
self.set_yscale("linear")
_xscale = 'linear'
_yscale = 'linear'
for item in list:
item.setLabel(self.xLabel, self.yLabel)
# control axis labels from the panel itself
yname, yunits = item.get_yaxis()
xname, xunits = item.get_xaxis()
# Goes through all possible scales
# Goes through all possible scales
if(self.xLabel == "x"):
item.transformX(transform.toX, transform.errToX)
self.graph._xaxis_transformed("%s" % xname, "%s" % xunits)
if(self.xLabel == "log10(x)"):
item.transformX(transform.toX_pos, transform.errToX_pos)
_xscale = 'log'
self.graph._xaxis_transformed("%s" % xname, "%s" % xunits)
if(self.yLabel == "y"):
item.transformY(transform.toX, transform.errToX)
self.graph._yaxis_transformed("%s" % yname, "%s" % yunits)
if(self.yLabel == "log10(y)"):
item.transformY(transform.toX_pos, transform.errToX_pos)
_yscale = 'log'
self.graph._yaxis_transformed("%s" % yname, "%s" % yunits)
item.transformView()
self.prevXtrans = self.xLabel
self.prevYtrans = self.yLabel
self.set_xscale(_xscale)
self.set_yscale(_yscale)
self.draw()
class DataOperatorWindow(widget.CHILD_FRAME):
def __init__(self, parent, manager, *args, **kwds):
kwds["size"] = (PANEL_WIDTH, PANEL_HEIGTH)
widget.CHILD_FRAME.__init__(self, parent, *args, **kwds)
self.parent = parent
self.manager = manager
self.panel = DataOperPanel(parent=self)
wx.EVT_CLOSE(self, self.OnClose)
self.SetPosition((wx.LEFT, PANEL_TOP))
self.Show()
def OnClose(self, event=None):
"""
On close event
"""
if self.manager is not None:
self.manager.data_operator_frame = None
self.panel.disconnect_panels()
self.Destroy()
if __name__ == "__main__":
app = wx.App()
widget.CHILD_FRAME = wx.Frame
window = DataOperatorWindow(parent=None, data=[], title="Data Editor")
app.MainLoop()
|
"""basic-types.py
Module for creating and running the basic types classifier.
Usage
$ python3 basic-types.py --train-semcor
Train a classifier from all of Semcor and save it in
../data/classifier-all.pickle.
$ python3 basic-types.py --train-test
Train a classifier from a fragemtn of Semcor (two files) and save it in
../data/classifier-002.pickle, for testing and debugging purposes.
$ python3 basic-types.py --test
Test the classifier on a feature set and test the evaluation code.
$ python3 basic-types.py --classify-file FILENAME
Run the classifier on filename, output will be written to the terminal.
$ python3 basic-types.py --classify-spv1
Run the classifier on all SPV1 files, output will be written to the out/
directory.
The last two invocations both require the NLTK CoreNLPDependencyParser which
assumes that the Stanford CoreNLP server is running at port 9000. To use the
server run the following from the corenlp directory:
$ java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -preload tokenize,ssplit,pos,lemma,depparse -status_port 9000 -port 9000 -timeout 15000
Note that this invocation does not allow you browser access to port 9000 because
the homepage uses an annotator that is not loaded by the above command.
"""
import os, sys, csv, getopt, pickle, codecs, json, glob
import nltk
from nltk.parse.corenlp import CoreNLPDependencyParser
from nltk.stem import WordNetLemmatizer
from semcor import Semcor, SemcorFile
SC_SENT = '../data/semcor.sent.tsv'
SC_TOKEN_FT = '../data/semcor.token.tsv'
SC_TOKEN_FT_SMALL = '../data/semcor.token.tsv.10000'
SC_TOKEN_DEP = '../data/semcor.token.fv'
def data_prep(file):
with open(file) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
data = [row for row in reader]
return data
def extract_types(sc):
"""
Returns a list of types from each wordform in semcor as well as a mapping
from integers to tokens.
"""
types = []
mapping = {}
i = 0
sen_list = [file.get_sentences() for file in sc.files]
for list in sen_list:
for item in list:
wf = item.wfs
for form in wf:
i += 1
if form.is_word_form():
if form.lemma is not None:
mapping.update({str(i): form.lemma})
else:
mapping.update({str(i): form.text})
if form.synset is not None:
types.append(form.synset.btypes)
else:
types.append(None)
else:
mapping.update({str(i): form.text})
types.append(None)
return types, mapping
def feature_set(types, token_features, mapping):
mapped = zip(token_features, types)
feature_set = []
for i in mapped:
if i[1] is not None:
features = i[0]
# indexs: [1] token_id, [2] sent_id, [3] token_no, [4] surface,[5] lemma, [6] pos, [7] sense_no, [8] sense_key, [9] ssid,
# [10] int_dom_token_no, [11] dom_token_id, [12] rel
if features[5] != 'VB':
feature_dict = {
"surface" : features[3],
"lemma" : features[4],
"pos" : features[5],
"sense_no" : features[6],
"sense_key" : features[7],
"ssid" : features[8],
"rel" : features[11],
}
if features[9] != '0':
feature_dict.update({
"int_dom_token": mapping[features[9]],
"dom_token": mapping[features[10]]
})
else:
feature_dict.update({
"int_dom_token": None,
"dom_token": None
})
# print((feature_dict, i[1]))
feature_set.append((feature_dict, i[1]))
return feature_set
def split_data(feature_set):
index = int(len(feature_set) * .8)
training_set, test_set = feature_set[:index], feature_set[index:]
return training_set, test_set
def train_classifier(training_set):
classifier = nltk.NaiveBayesClassifier.train(training_set)
return classifier
def save_classifier(classifier, name):
filename = '../data/classifier-%s.pickle' % name
print("Saving %s" % filename)
with open(filename, 'wb') as fh:
pickle.dump(classifier, fh)
def load_classifier(name):
filename = '../data/classifier-%s.pickle' % name
print("Loading %s" % filename)
with open(filename, 'rb') as fh:
classifier = pickle.load(fh)
return classifier
def save_test_features(features, name):
filename = '../data/test-features-%s.pickle' % name
print("Saving %s" % filename)
with open(filename, 'wb') as fh:
pickle.dump(features, fh)
def load_test_features(name):
filename = '../data/test-features-%s.pickle' % name
print("Loading %s" % filename)
with open(filename, 'rb') as fh:
features = pickle.load(fh)
return features
def evaluate_classifier(classifier, test_set):
"""
:param classifier: classifier that has been trained on training set.
:param test_set: 20% of the featureset which includes features and a label.
:return: percentage accuracy of the classifier being able to label the data correctly based on features.
"""
accuracy = nltk.classify.accuracy(classifier, test_set)
return accuracy
def print_type_count(types):
count = len([t for t in types if t is not None])
print("Total number of types: %d" % len(types))
print("Number of non-nil types: %d" % count)
def train_test():
"""Train a model on the first 2 files of Semcor, using the partial feature file
SC_TOKEN_FT_SMALL, this evaluates at 0.8808. Model and test features are
written to ../data."""
semcor = Semcor(2)
_train(semcor, SC_TOKEN_FT_SMALL, '002')
def train():
"""Train a model on all of Semcor, using the full feature file SC_TOKEN_FT, this
evaluates at 0.9334. Model and test features are written to ../data."""
semcor = Semcor()
_train(semcor, SC_TOKEN_FT, 'all')
def _train(semcor, features_in, model_name):
token_features = data_prep(features_in)
types_from_semcor, identifier2token = extract_types(semcor)
print_type_count(types_from_semcor)
feature_data = feature_set(types_from_semcor, token_features, identifier2token)
training_set, test_set = split_data(feature_data)
# maybe add an option to train on the entire set
classifier = train_classifier(training_set)
print("Labels: %s" % classifier.labels())
accuracy = evaluate_classifier(classifier, test_set)
print("Accuracy on test set is %.4f" % accuracy)
save_classifier(classifier, model_name)
save_test_features(test_set, model_name)
#classifier.show_most_informative_features(20)
def test_classifier(classifier_name, test_set):
# just run one set of features through it
print("Running classifier on one set of features")
classifier = load_classifier(classifier_name)
features = {'pos': 'NN', 'rel': 'nsubj',
'sense_key': '1:09:00::', 'ssid': '05808619', 'sense_no': '1',
'dom_token': 'produce', 'int_dom_token': 'produce',
'lemma': 'investigation', 'surface': 'investigation'}
print(classifier.classify(features))
print("Evaluating classifier")
test_set = load_test_features(test_set)
print(classifier.labels())
accuracy = evaluate_classifier(classifier, test_set)
print("Accuracy on test set is %.4f" % accuracy)
classifier.show_most_informative_features(20)
def run_classifier_on_file(fname_in, fname_out=None):
classifier = load_classifier('all')
lemmatizer = WordNetLemmatizer()
text = codecs.open(fname_in).read()
if fname_out is None:
fh_out = sys.stdout
else:
fh_out = codecs.open(fname_out, 'w')
sentences = nltk.sent_tokenize(text)
parser = CoreNLPDependencyParser(url='http://localhost:9000')
for sentence in sentences:
parses = parser.parse(nltk.word_tokenize(sentence))
for parse in parses:
for (gov, gov_pos), rel, (dep, dep_pos) in parse.triples():
if dep_pos in ('NN', 'NNS'):
lemma = lemmatizer.lemmatize(dep)
features = {'pos': dep_pos, 'rel': rel,
'lemma': lemma, 'surface': dep,
'dom_token': gov, 'int_dom_token': gov}
label = classifier.classify(features)
fh_out.write("%s\t%s\n" % (lemma, label))
print(lemma, label)
print('')
def run_classifier_on_string(classifier, lemmatizer, text, fname_out):
fh_out = codecs.open(fname_out, 'w')
sentences = nltk.sent_tokenize(text)
parser = CoreNLPDependencyParser(url='http://localhost:9000')
for sentence in sentences:
parses = parser.parse(nltk.word_tokenize(sentence))
for parse in parses:
for (gov, gov_pos), rel, (dep, dep_pos) in parse.triples():
if dep_pos in ('NN', 'NNS'):
lemma = lemmatizer.lemmatize(dep)
features = {'pos': dep_pos, 'rel': rel,
'lemma': lemma, 'surface': dep,
'dom_token': gov, 'int_dom_token': gov}
label = classifier.classify(features)
fh_out.write("%s\t%s\n" % (lemma, label))
def run_classifier_on_spv1():
classifier = load_classifier('all')
lemmatizer = WordNetLemmatizer()
fnames = glob.glob('/DATA/dtra/spv1-results-lif-ela/documents/*.json')
for fname in fnames[:2]:
try:
with codecs.open(fname) as fh:
json_object = json.load(fh)
text = json_object['text']
print(fname, len(text))
outfile = os.path.join('out', os.path.basename(fname))
run_classifier_on_string(classifier, lemmatizer, text, outfile)
except:
print('ERROR')
if __name__ == '__main__':
options = ['train-test', 'train-semcor', 'test', 'classify-file=', 'classify-spv1']
opts, args = getopt.getopt(sys.argv[1:], '', options)
for opt, val in opts:
if opt == '--train-test':
train_test()
elif opt == '--train-semcor':
train()
elif opt == '--test':
# test the classifeir with the full model on the 002 test set, gives
# unrealistic results because the training data probably includes
# the test data, just here to see whether the mechanism works
test_classifier('all', '002')
elif opt == '--classify-file':
filename = val
run_classifier_on_file(filename)
elif opt == '--classify-spv1':
run_classifier_on_spv1()
|
import datetime as dt
import glob
import numpy as np
try:
#for python 3.0 or later
from urllib.request import urlopen
except ImportError:
#Fall back to python 2 urllib2
from urllib2 import urlopen
import os
from format_temps import format_file
files = glob.glob('../txtout/*txt')
for i in files:
x = np.loadtxt(i,dtype={'names':('file','time','pass','fivsig','exptime'),'formats':('S35','S20','i1','i4','f8')},skiprows=2)
time = x['time'].astype('S10')
utime = np.unique(time)
for p in utime:
for j in np.arange(-2,2):
day = dt.datetime.strptime(p.decode('utf-8'),'%Y/%m/%d')+dt.timedelta(days=int(j))
t = day.strftime('%Y%m%d')
fname= '{0}_iris_temp.txt'.format(t)
if os.path.isfile(fname):
continue
else:
res = urlopen('http://www.lmsal.com/~boerner/iris/temps/{0}'.format(fname))
dat = res.read()
fo = open(fname,'w')
fo.write(dat)
fo.close()
format_file(fname)
|
""" Base classes for Electrical Optical (EO) calibration data
These classes define the interface between the transient data classes used
in EO test code, and the `astropy.table.Table` classes used for persistent
storage.
Specifically they provide ways to define table structures in schema, and the
use those schema to facilitate backwards compatibility.
"""
import sys
from typing import Mapping
from collections import OrderedDict
from astropy.table import Table, Column
__all__ = ["EoCalibField", "EoCalibTableSchema", "EoCalibTable", "EoCalibTableHandle"]
class EoCalibField:
""" Defines a single field and provide the information needed to connect
a Class attribute (e.g., self.aVariable) to an
`astropy.table.Column` (e.g., 'VARIABLE')
Parameters
----------
name : `str`
Name of the column. In UPPER by convention.
dtype : `type`
Data type for elements in the Column. E.g., `float` or `int`.
shape : `list`
Define the shape of each element in the Column.
List elements can be either `int` or `str`
`str` elements will replaced with `int` at construction using keywords
kwds : `dict` [`str`, `Any`]
These will be passed to `astropy.table.Column` constructor.
Notes
-----
This class should be used as class attribute in defining table schema
classes, I.e., it should only ever appear as a class attribute in a
sub-class of `EoCalibTableSchema`
"""
@staticmethod
def _format_shape(shape, **kwargs):
""" Format the list of shape elements, replacing any `str` using
keywords
Parameters
----------
shape : `list`
Define the shape of each element in the Column.
kwargs : `dict` [`str`, `Any`]
Used to replace the str elements in shape
Returns
-------
outShape : `tuple` [`int`]
The shape of each element in the column
"""
outShape = []
for axis in shape:
if isinstance(axis, int):
outShape.append(axis)
continue
if isinstance(axis, str):
try:
outShape.append(kwargs[axis])
continue
except KeyError as msg: # pragma: no cover
raise KeyError("Failed to convert EoCalibField column shape %s." % str(shape)) from msg
raise TypeError("Axis shape items must be either int or str, not %s" % type(axis)) # pragma: no cover # noqa
return tuple(outShape)
def __init__(self, **kwargs):
""" C'tor, Fills class parameters """
kwcopy = kwargs.copy()
self._name = kwcopy.pop('name')
self._dtype = kwcopy.pop('dtype', float)
self._shape = kwcopy.pop('shape', [1])
self._kwds = kwcopy
@property
def name(self):
""" Name of the Column. """
return self._name
@property
def dtype(self):
""" Data type for elements in the Column. """
return self._dtype
@property
def shape(self):
""" Template shape of each element in the Column. """
return self._shape
@property
def kwds(self):
""" Remaining keywords passed to Column constructor. """
return self._kwds
def validateColumn(self, column):
""" Check that a column matches the definition.
Raises
------
ValueError : Column data type does not match definition.
"""
if 'unit' in self._kwds:
column.unit = self._kwds['unit']
if 'description' in self._kwds:
column.description = self._kwds['description']
# if column.dtype.type != self._dtype:
# raise ValueError("Column %s data type not equal to schema data type %s != %s" % # noqa
# (column.name, column.dtype.type, self._dtype))
def validateValue(self, value):
""" Check that a value matches the definition and can be
used to fill a column.
Raises
------
ValueError : value data type does not match definition.
"""
# if value.dtype.type != self._dtype:
# raise ValueError("Item %s data type not equal to schema data type %s != %s" % # noqa
# (self._name, type(value), self._dtype))
def makeColumn(self, **kwargs):
""" Construct and return an `astropy.table.Column`
Notes
-----
Uses keyword arguements in two ways:
1. Replace string in shape template
2. `length' is used to set column lenght
"""
return Column(name=self._name, dtype=self._dtype,
shape=self._format_shape(self._shape, **kwargs),
length=kwargs.get('length', 0),
**self._kwds)
def convertToValue(self, column, **kwargs):
""" Return data from column as a `numpy.array`
Keywords
--------
validate : `bool`
If true, will validate the column
"""
if kwargs.get('validate', False): # pragma: no cover
self.validateColumn(column)
return column.data
def convertToColumn(self, value, **kwargs):
""" Construct and return an `astropy.table.Column` from value.
Keywords
--------
validate : `bool`
If true, will validate the value
"""
if kwargs.get('validate', False): # pragma: no cover
self.validateValue(value)
return Column(name=self._name, dtype=self._dtype,
data=value, **self._kwds)
def writeMarkdownLine(self, varName, stream=sys.stdout):
""" Write a line of markdown describing self to stream
Parameters
----------
varName : `str`
The name of the variable associated to this field.
"""
md_dict = dict(varName=varName,
name=self._name,
dtype=self.dtype.__name__,
shape=self.shape,
unit="", description="")
md_dict.update(self._kwds)
tmpl = "| {varName} | {name} | {dtype} | {shape} | {unit} | {description} | \n".format(**md_dict)
stream.write(tmpl)
def copy(self, **kwargs):
""" Return an udpated copy of self using keyword to override fields """
kwcopy = dict(name=self._name, dtype=self._dtype, shape=self._shape)
kwcopy.update(self.kwds)
kwcopy.update(kwargs)
return EoCalibField(**kwcopy)
class EoCalibTableSchema:
""" Stores schema for a single `astropy.table.Table`
Each sub-class will define one version of the schema.
The naming convention for the sub-classes is:
{DataClassName}SchemaV{VERSION} e.g., 'EoTableDataSchemaV0'
Parameters
----------
TABLELENGTH : `str`
Name of the keyword to use to extract table length
fieldDict : `OrderedDict` [`str`, `EoCalibField`]
Maps field names (e.g., 'aVariable') to EoCalibField objects
columnDict : `OrderedDict` [`str`, `str`]
Maps column names (e.g., 'VARIABLE') to field names
"""
TABLELENGTH = ""
@classmethod
def findFields(cls):
""" Find and return the EoCalibField objects in a class
Returns
-------
fields : `OrderedDict` [`str`, `EoCalibField`]
"""
theClasses = cls.mro()
fields = OrderedDict()
for theClass in theClasses:
for key, val in theClass.__dict__.items():
if isinstance(val, EoCalibField):
fields[key] = val
return fields
@classmethod
def fullName(cls):
""" Return the name of this class """
return cls.__name__
@classmethod
def version(cls):
""" Return the version number of this schema
This relies on the naming convention: {DataClassName}SchemaV{VERSION}
"""
cStr = cls.__name__
return int(cStr[cStr.find("SchemaV")+7:])
@classmethod
def dataClassName(cls):
""" Return the name of the associated data class
This relies on the naming convention: {DataClassName}SchemaV{VERSION}
"""
cStr = cls.__name__
return cStr[:cStr.find("SchemaV")]
def __init__(self):
""" C'tor, Fills class parameters """
self._fieldDict = self.findFields()
self._columnDict = OrderedDict([(val.name, key) for key, val in self._fieldDict.items()])
def validateTable(self, table):
""" Check that table matches this schema
Raises
------
KeyError : Columns names in table do not match schema
"""
unused = {key: True for key in self._fieldDict.keys()}
for col in table.columns:
try:
key = self._columnDict[col]
field = self._fieldDict[key]
unused.pop(key, None)
except KeyError as msg: # pragma: no cover
raise KeyError("Column %s in table is not defined in schema %s" %
(col.name, type(self))) from msg
field.validateColumn(table[col])
if unused: # pragma: no cover
raise KeyError("%s.validateTable() failed because some columns were not provided %s" %
(type(self), str(unused)))
def validateDict(self, dictionary):
""" Check that dictionary matches this schema
Raises
------
KeyError : dictionary keys in table do not match schema
"""
unused = {key: True for key in self._fieldDict.keys()}
for key, val in dictionary.items():
if key == 'meta':
continue
try:
field = self._fieldDict[key]
unused.pop(key, None)
except KeyError as msg: # pragma: no cover
raise KeyError("Column %s in table is not defined in schema %s" %
(key, type(self))) from msg
field.validateValue(val)
if unused: # pragma: no cover
raise KeyError("%s.validateDict() failed because some columns were not provided %s" %
(type(self), str(unused)))
def makeTable(self, **kwargs):
""" Make and return an `astropy.table.Table`
Notes
-----
keywords are used to define table length and element shapes
"""
kwcopy = kwargs.copy()
length = kwcopy.pop(self.TABLELENGTH, 0)
table = Table([val.makeColumn(length=length, **kwcopy) for val in self._fieldDict.values()])
table.meta['schema'] = self.fullName()
table.meta['name'] = kwcopy.pop('name', None)
table.meta['handle'] = kwcopy.pop('handle', None)
return table
def convertToTable(self, dictionary, **kwargs):
""" Convert dictionary to `astropy.table.Table` and return it
Keywords
--------
validate : `bool`
If true, will validate the columns
Raises
------
KeyError : dictionary keys in table do not match schema
"""
unused = {key: True for key in self._fieldDict.keys()}
columns = []
meta = None
for key, val in dictionary.items():
if key == 'meta':
meta = val
continue
try:
field = self._fieldDict[key]
unused.pop(key)
except KeyError as msg: # pragma: no cover
raise KeyError("Column %s in table is not defined in schema %s" %
(key, type(self))) from msg
columns.append(field.convertToColumn(val), **kwargs)
if unused: # pragma: no cover
raise KeyError("%s.validateDict() failed because some columns were not provided %s" %
(type(self), str(unused)))
table = Table(columns)
if meta:
table.meta.update(meta)
return table
def convertToDict(self, table, **kwargs):
""" Convert table to `OrderedDict` and return it
Keywords
--------
validate : `bool`
If true, will validate the columns
Raises
------
KeyError : column names in table do not match schema
"""
unused = {key: True for key in self._fieldDict.keys()}
outDict = OrderedDict()
for colName in table.columns:
try:
key = self._columnDict[colName]
field = self._fieldDict[key]
col = table[colName]
unused.pop(key)
except KeyError as msg: # pragma: no cover
raise KeyError("Column %s in table is not defined in schema %s" %
(colName, type(self))) from msg
outDict[key] = field.convertToValue(col, **kwargs)
if unused: # pragma: no cover
raise KeyError("%s.convertToDict() failed because some columns were not provided %s" %
(type(self), str(unused)))
outDict['meta'] = table.meta
return outDict
def writeMarkdown(self, name, stream=sys.stdout):
""" Write a table of markdown describing self to stream
Parameters
----------
name : `str`
Name of field associated to this schema
"""
stream.write("| Name | Class | Version | Length |\n")
stream.write("|-|-|-|-|\n")
stream.write("| %s | %s | %i | %s |\n" %
(name, self.dataClassName(), self.version(), self.TABLELENGTH))
stream.write("\n\n")
stream.write("| Name | Column | Datatype | Shape | Units | Description |\n")
stream.write("|-|-|-|-|-|-|\n")
for key, val in self._fieldDict.items():
val.writeMarkdownLine(key, stream)
stream.write("\n\n")
class EoCalibTable:
""" Provides interface between `astropy.table.Table` and
`EoCalibTableSchema`
Each sub-class will define one all the versions of a particular
data table, and provide backward compatibility
to older versions of the schema.
Parameters
----------
SCHEMA_CLASS : `type`
Current schema class
PREVIOUS_SCHEMAS : `list` [`type`]
Previous schema classes
schema : `EoCalibTableSchema`
Schema for this data structure
table : `astropy.table.Table`
Table with actual data
Notes
-----
By default this class will construct a table using the current
version of the schema. However, it can also use older version,
e.g., when being constructed from a table being read from an
old file.
"""
SCHEMA_CLASS = EoCalibTableSchema
PREVIOUS_SCHEMAS = []
def __init__(self, data=None, **kwargs):
""" C'tor, Fills class parameters
Parameters
----------
data : `Union`, [`astropy.table.Table`, `None`]
If provided, the data used to build the table
If `None`, table will be constructed using shape parameters
taken for kwargs
Keywords
--------
schema : `EoCalibTableSchema`
If provided will override schema class
"""
kwcopy = kwargs.copy()
self._schema = kwcopy.pop('schema', self.SCHEMA_CLASS())
self._version = self._schema.version()
if isinstance(data, Table):
self._schema.validateTable(data)
self._table = data
elif data is None:
self._table = self._schema.makeTable(**kwcopy)
else: # pragma: no cover
raise TypeError("EoCalibTable input data must be None, Table or dict, not %s" % (type(data)))
@property
def table(self):
""" Return the underlying `astropy.table.Table` """
return self._table
@classmethod
def schema(cls):
""" Return an instance of the schema """
return cls.SCHEMA_CLASS()
@classmethod
def allSchemaClasses(cls):
""" Return a `list` of all the associated schema classes """
return [cls.SCHEMA_CLASS] + cls.PREVIOUS_SCHEMAS
@classmethod
def schemaDict(cls):
""" Return an `OrderedDict` of all the associated schema classes
mapped by class name """
return OrderedDict([(val.fullName(), val) for val in cls.allSchemaClasses()])
class EoCalibTableHandle:
""" Provide interface between an `EoCalibSchema` and the `EoCalibTable`
and `EoCalibTableSchema` objects that define it
This allows a particular `EoCalibData` class to have
1. Table of different types
2. Multiple tables of the same type, but with different names
Parameters
----------
tableName : `str`
Template for the name of the table.
Should include '{key}' if this handle is used for multiple tables
tableClass : `type`
`EoCalibTable` sub-type associated to the table.
schema : `EoCalibTableSchema`
Schema associted to the table
schemaDict : `OrderedDict` [`str`, `type`]
Dictionary mapping from class name to `EoCalibTableSchema` sub-class
multiKey : `Union` [`str`, `None`]
Name of keyword used to replace `{key}` when formating table name
"""
@staticmethod
def findTableMeta(tableObj, metaKey):
""" Find and return metaData from a table-like object
Parameters
----------
tableObj : `Union` [`astropy.table.Table`, `OrderedDict']
The table-like object
metaKey : `str`
The key for the meta data field
Raises
------
TypeError : input object is wrong type.
"""
if isinstance(tableObj, Table):
if metaKey in tableObj.meta:
return tableObj.meta[metaKey]
return tableObj.meta[metaKey.upper()]
if isinstance(tableObj, Mapping):
return tableObj['meta'][metaKey]
raise TypeError("findTableMeta requires Table or Mapping, not %s" % type(tableObj)) # pragma: no cover # noqa
def __init__(self, **kwargs):
""" C'tor, Fills class parameters """
self._tableName = kwargs.get('tableName')
self._tableClass = kwargs.get('tableClass')
self._schema = self._tableClass.schema()
self._schemaDict = self._tableClass.schemaDict()
self._multiKey = kwargs.get('multiKey', None)
if self._multiKey is not None:
if self._tableName.find('{key}') < 0: # pragma: no cover
raise ValueError("EoCalibTableHandle has _multiKey, but tableName does not contain '{key}'")
@property
def schema(self):
""" Return the associated schema """
return self._schema
@property
def multiKey(self):
""" Keyword used to replace `{key}` when formating table name
`None` means that this handle is assocatied to a single table.
"""
return self._multiKey
def getTableSchemaClass(self, tableObj):
""" Return the schema class associated to a table
Notes
-----
This uses the meta data field 'schema' to get the name
of the schema class
"""
tableSchemaName = self.findTableMeta(tableObj, "schema")
return self._schemaDict[tableSchemaName]
def validateTable(self, table):
""" Validate a table using schema """
tableSchema = self.getTableSchemaClass(table)()
tableSchema.validateTable(table)
def validateDict(self, dictionary):
""" Validate a dictionary using schema """
tableSchema = self.getTableSchemaClass(dictionary)()
tableSchema.validateDict(dictionary)
def convertToTable(self, dictionary):
""" Convert a dictionary to a table using schema """
tableSchema = self.getTableSchemaClass(dictionary)()
return tableSchema.convertToTable(dictionary)
def convertToDict(self, table):
""" Convert a table to a dictionary using schema """
tableSchema = self.getTableSchemaClass(table)()
return tableSchema.convertToDict(table)
def makeTables(self, **kwargs):
""" Build and return `OrderedDict` mapping table names to
newly created `astropy.table.Table` objects """
kwcopy = kwargs.copy()
if self._multiKey is None:
tableNames = [self._tableName]
else:
tableKeys = kwcopy.pop(self._multiKey, [])
tableNames = [self._tableName.format(key=tableKey) for tableKey in tableKeys]
return OrderedDict([(tableName, self._tableClass(name=tableName, **kwcopy))
for tableName in tableNames])
def makeEoCalibTable(self, table):
""" Convert table to `EoCalibTable` by attaching the correct schema """
tableSchema = self.getTableSchemaClass(table)()
tableName = self.findTableMeta(table, 'name')
return self._tableClass(name=tableName, schema=tableSchema, data=table)
|
# Copyright [2018-2020] Peter Krenesky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ClassPropertyDescriptor(object):
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj) # pragma: no cover
return self.fget.__get__(obj, klass)()
def __set__(self, obj, value):
if not self.fset: # pragma: no cover
raise AttributeError("can't set attribute") # pragma: no cover
type_ = type(obj) # pragma: no cover
return self.fset.__get__(obj, type_)(value) # pragma: no cover
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
class cached_property:
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, func, name=None):
self.func = func
self.__doc__ = getattr(func, "__doc__")
self.name = name or func.__name__
def __get__(self, instance, cls=None):
"""
Call the function and put the return value in instance.__dict__ so that
subsequent attribute access on the instance returns the cached value
instead of calling cached_property.__get__().
"""
if instance is None:
return self # pragma: no cover
res = instance.__dict__[self.name] = self.func(instance)
return res
|
import numpy as np
from MLG.name import name, name_good
from MLG.Math import percentile
from MLG import paperpath, path
from astropy.table import Table
def create_Table(Analysis_result, Analysis_result5, Analysis_result_external = None, sort_epoch =False):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
tab = None
lines = []
epoch = []
rel_err = []
lens5 = np.array([i[0].getId() for i in Analysis_result5[2]])
source5 = np.array([i[1].getId() for i in Analysis_result5[2]])
if Analysis_result_external is not None:
lens_ext = np.array([i[0].getId() for i in Analysis_result_external[2]])
source_ext = np.array([i[1].getId() for i in Analysis_result_external[2]])
external = True
else:
external = False
for i in range(len(Analysis_result[0])):
lens = Analysis_result[2][i][0]
s_ID1 = str(lens.getId())
s_name = name(lens.getId(), False, True)
Good_ = name_good(lens.getId())
if Good_ and lens.getMag() > 5:
s_count = '%s'%i
source = Analysis_result[2][i][1]
s_ID2 = str(source.getId())
s_TCA = '%.3f'%lens.getTca()
s_Mass = '%.2g'%lens.getMass()
if source.getPx() == 0: fivepar = '^{*}'
else: fivepar =''
mm_10 = np.array([Analysis_result[0][i][x][0][6] for x in range(len(Analysis_result[0][i]))\
if Analysis_result[0][i][x][0][4] != -999])
mp_10 = np.array([Analysis_result[0][i][x][0][7] for x in range(len(Analysis_result[0][i]))\
if Analysis_result[0][i][x][0][4] != -999])
mmm_10 = percentile(mm_10)
ppp_10 = percentile(mp_10)
delta_M_10_p = max(ppp_10[4],-mmm_10[3])
delta_M_10_m = min(ppp_10[3],-mmm_10[4])
delta_M_10 = max(ppp_10[1], -mmm_10[1])
c=0
while 10**(-c)>= delta_M_10 and c < 10:
c+=1
c+=1
s_delta_M_10_m = str(round(delta_M_10_m-0.49999999*10**(-c),c))
s_delta_M_10_p = str(round(delta_M_10_p+0.49999999*10**(-c),c))
s_delta_M_10 = str(round(delta_M_10+0.49999999*10**(-c),c))
while(len(s_delta_M_10_m) <= c): s_delta_M_10_m=s_delta_M_10_m+'0'
while(s_delta_M_10_m[-c-1]) != '.': s_delta_M_10_m=s_delta_M_10_m+'0'
while(len(s_delta_M_10_p) <= c): s_delta_M_10_p=s_delta_M_10_p+'0'
while(s_delta_M_10_p[-c-1]) != '.': s_delta_M_10_p=s_delta_M_10_p+'0'
while(len(s_delta_M_10) <= c): s_delta_M_10=s_delta_M_10+'0'
while(s_delta_M_10[-c-1]) != '.': s_delta_M_10=s_delta_M_10+'0'
s_delta_M_percent = '%.2g'%(delta_M_10/lens.getMass()*100)
s_delta_M_5_m = 'NONE'
s_delta_M_5_p = 'NONE'
s_delta_M_5 = 'NONE'
delta_M_5_test = 0
delta_M_5_p = -999
delta_M_5_m = -999
delta_M_5 = -999
which5 = np.where((lens5 == lens.getId()) & (source5 == source.getId()))[0]
if len(which5) == 1 :
k = which5[0]
mm_5 = np.array([Analysis_result5[0][k][x][0][6] for x in range(len(Analysis_result5[0][k]))\
if Analysis_result5[0][k][x][0][4] != -999])
mp_5 = np.array([Analysis_result5[0][k][x][0][7] for x in range(len(Analysis_result5[0][k]))\
if Analysis_result5[0][k][x][0][4] != -999])
mmm_5 = percentile(mm_5)
ppp_5 = percentile(mp_5)
delta_M_5_p = max(ppp_5[4],-mmm_5[3])
delta_M_5_m = min(ppp_5[3],-mmm_5[4])
delta_M_5 = max(ppp_5[1], -mmm_5[1])
delta_M_5_test = delta_M_5
c=0
while 10**(-c)>= delta_M_5 and c < 10:
c+=1
c+=1
s_delta_M_5_m = str(round(delta_M_5_m-0.49999999*10**(-c),c))
s_delta_M_5_p = str(round(delta_M_5_p+0.49999999*10**(-c),c))
s_delta_M_5 = str(round(delta_M_5+0.49999999*10**(-c),c))
while(len(s_delta_M_5_m) <= c): s_delta_M_5_m=s_delta_M_5_m+'0'
while(s_delta_M_5_m[-c-1]) != '.': s_delta_M_5_m=s_delta_M_5_m+'0'
while(len(s_delta_M_5_p) <= c): s_delta_M_5_p=s_delta_M_5_p+'0'
while(s_delta_M_5_p[-c-1]) != '.': s_delta_M_5_p=s_delta_M_5_p+'0'
while(len(s_delta_M_5) <= c): s_delta_M_5=s_delta_M_5+'0'
while(s_delta_M_5[-c-1]) != '.': s_delta_M_5=s_delta_M_5+'0'
if external:
s_delta_M_ext_m = 'NONE'
s_delta_M_ext_p = 'NONE'
s_delta_M_ext = 'NONE'
delta_M_ext_test = 0
which_ext = np.where((lens_ext == lens.getId()) & (source_ext == source.getId()))[0]
if len(which_ext) == 1 :
k = which_ext[0]
mm_ext = np.array([Analysis_result_external[0][k][x][0][6]\
for x in range(len(Analysis_result_external[0][k])) \
if Analysis_result_external[0][k][x][0][4] != -999])
mp_ext = np.array([Analysis_result_external[0][k][x][0][7] \
for x in range(len(Analysis_result_external[0][k])) \
if Analysis_result_external[0][k][x][0][4] != -999])
mmm_ext = percentile(mm_ext)
ppp_ext = percentile(mp_ext)
delta_M_ext_p = max(ppp_ext[4],-mmm_ext[3])
delta_M_ext_m = min(ppp_ext[3],-mmm_ext[4])
delta_M_ext = max(ppp_ext[1], -mmm_ext[1])
delta_M_ext_test = delta_M_ext
c=0
while 10**(-c)>= delta_M_ext and c < 10:
c+=1
c+=1
s_delta_M_ext_m = str(round(delta_M_ext_m-0.49999999*10**(-c),c))
s_delta_M_ext_p = str(round(delta_M_ext_p+0.49999999*10**(-c),c))
s_delta_M_ext = str(round(delta_M_ext+0.49999999*10**(-c),c))
while(len(s_delta_M_ext_m) <= c): s_delta_M_ext_m=s_delta_M_ext_m+'0'
while(s_delta_M_ext_m[-c-1]) != '.': s_delta_M_ext_m=s_delta_M_ext_m+'0'
while(len(s_delta_M_ext_p) <= c): s_delta_M_ext_p=s_delta_M_ext_p+'0'
while(s_delta_M_ext_p[-c-1]) != '.': s_delta_M_ext_p=s_delta_M_ext_p+'0'
while(len(s_delta_M_ext) <= c): s_delta_M_ext=s_delta_M_ext+'0'
while(s_delta_M_ext[-c-1]) != '.': s_delta_M_ext=s_delta_M_ext+'0'
s_delta_M_percent_ext = '%.2g'%(delta_M_ext/lens.getMass()*100)
#---------------------------
#create Astropy.table
if tab is None:
tab = Table(names = ['name', 'lens_id', 'source_id', 'fivepar', 'TCA', 'Mass',\
'deltaM_10','sigma_deltaM_10_+', 'sigma_deltaM_10_-',\
'deltaM_5' , 'sigma_deltaM_5_+','sigma_deltaM_5_-',\
'deltaM_ext' , 'sigma_deltaM_ext_+','sigma_deltaM_ext_-'],\
dtype = [object, np.int64, np.int64, np.bool_,np.float64, np.float64, np.float64, np.float64,\
np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64])
tab.add_row([s_name,lens.getId(),source.getId(),source.getPx() != 0,lens.getTca(),lens.getMass(),\
delta_M_10, delta_M_10_p, delta_M_10_m,\
delta_M_5, delta_M_5_p, delta_M_5_m,\
delta_M_ext, delta_M_ext_p, delta_M_ext_m])
#---------------------------
#---------------------------
#element list for sorting
epoch.append(lens.getTca())
rel_err.append(delta_M_10/lens.getMass())
#---------------------------
#---------------------------
#create row in latex table
# \\(int\\) & \\(Name\\) & \\(ID1\\) & \\(ID2\\) & \\(T_CA\\) & \\(M_in\\)
# & \\(deltaM_plus^{sigma+}_{sigma-}\\) & \\(deltaM_minus^{sigma+}_{sigma-} \\) \\\\
line = '%s & \\(%s\\) & \\(%s%s\\) & \\(%s\\) & \\(%s\\) & \\(\\pm%s^{+%s}_{%s}\\) & \\(%s%s\\)'\
%(s_name,s_ID1, s_ID2,fivepar, s_TCA, s_Mass,\
s_delta_M_10, s_delta_M_10_p, s_delta_M_10_m, s_delta_M_percent,'\\%')
if s_delta_M_5_m == 'NONE' or delta_M_5_test > lens.getMass():
line = line + ' & '
else:
line = line + ' & \\(\\pm%s^{+%s}_{%s}\\)'% (s_delta_M_5, s_delta_M_5_p, s_delta_M_5_m)
if s_delta_M_ext_m == 'NONE' or delta_M_ext_test > lens.getMass():
line = line + ' & '
else:
line = line + ' & \\(%s%s\\)'%(s_delta_M_percent_ext,'\\%')
lines.append(line)
#---------------------------
else:
#---------------------------
#create Astropy.table
if tab is None:
tab = Table(names = ['name', 'lens_id', 'source_id', 'fivepar', 'TCA', 'Mass',\
'deltaM_10', 'sigma_deltaM_10_+', 'sigma_deltaM_10_-',\
'deltaM_5' , 'sigma_deltaM_5_+','sigma_deltaM_5_-'],\
dtype = [object, np.int64, np.int64, np.bool_,np.float64, np.float64, np.float64,\
np.float64, np.float64, np.float64, np.float64, np.float64])
tab.add_row([s_name,lens.getId(),source.getId(),source.getPx() != 0,lens.getTca(),lens.getMass(),\
delta_M_10, delta_M_10_p, delta_M_10_m, delta_M_5, delta_M_5_p, delta_M_5_m])
#---------------------------
#element in list for sorting
epoch.append(lens.getTca())
rel_err.append(delta_M_10/lens.getMass())
#---------------------------
#---------------------------
#create row in latex table
#'\\(int\\) & \\(Name\\) & \\(ID1\\) & \\(ID2\\) & \\(T_CA\\) & \\(M_in\\)
#& \\(deltaM_plus^{sigma+}_{sigma-}\\) & \\(deltaM_minus^{sigma+}_{sigma-} \\) \\\\'
line = '%s & \\(%s\\) & \\(%s%s\\) & \\(%s\\) & \\(%s\\) & \\(\\pm%s^{+%s}_{%s}\\) & \\(%s%s\\)'\
%(s_name,s_ID1, s_ID2,fivepar, s_TCA, s_Mass, s_delta_M_10, s_delta_M_10_p, s_delta_M_10_m, s_delta_M_percent,'\\%')
if s_delta_M_5_m == 'NONE' or delta_M_5_test > lens.getMass():
line = line + ' & '
else:
line = line + ' & \\(\\pm%s^{+%s}_{%s}\\)'% (s_delta_M_5, s_delta_M_5_p, s_delta_M_5_m)
line = line + ' \\\\'
lines.append(line)
#---------------------------
#sorting list
if sort_epoch:
lines = [x for _,x in sorted(zip(epoch,lines))]
rel_err = [x for _,x in sorted(zip(epoch,rel_err))]
epoch = [x for _,x in sorted(zip(epoch,epoch))]
else:
lines = [x for _,x in sorted(zip(rel_err,lines))]
epoch = [x for _,x in sorted(zip(rel_err,epoch))]
rel_err = [x for _,x in sorted(zip(rel_err,rel_err))]
#---------------------------
#---------------------------
#save Table
if external: tablename = 'result_single_ext'
else: tablename = 'result_single'
print('write table: ' + tablename+ '.vot')
print('write tex_table: ' + tablename+ '.txt')
tab.write(path + 'Data/' + tablename+ '.vot', format = 'votable',overwrite=True)
#---------------------------
#---------------------------
#writing Latex Table
f = open(paperpath+tablename+'.tex','w')
#Setup a Table
f.write('\\renewcommand*{\\arraystretch}{1.4}'+'\n')
f.write('\\begin{table*}[]' +'\n')
f.write('\\input{result_single_caption1}\n') # include caption
f.write('\\label{table:single}' +'\n')
f.write('\\tiny')
#Format and head Row
f.write('\\begin{tabular}{l|rrrrr|rr|r|}' +'\n')
f.write('\\# & Name-Lens & \\(DR2\\_ID\\)-Lens & \\(DR2\\_ID\\)-Source & \\(T_{CA}\\) & \\(M_{in}\\)')
f.write(' & \\(\\sigma\\,M_{10}\\) & \\(\\sigma\\,M_{10}/M_{in}\\) & \\(\\sigma\\,M_{5} \\)')
f.write(' \\\\\n')
f.write(' & & & & \\(\\mathrm{Jyear}\\) & \\(\\mathrm{M_{\\odot}}\\)')
f.write(' & \\(\\mathrm{M_{\\odot}}\\) & & \\(\\mathrm{M_{\\odot}}\\)')
#if external: f.write(' & \\(\\mathrm{M_{\\odot}}\\) \\\\\n')
f.write(' \\\\\n')
f.write('\\hline' +'\n')
#
c3 = 1 #row counter combined
c4 = 0 #row counter individual tables
if sort_epoch: #sorted by epoch
#write rows
for l in range(len(lines)):
if (epoch[l] <= 2019.5) & (rel_err[l] <=0.5):
if c4%10> 4: f.write('\\rowcolor{lightGray}\n')
ll = lines[l]
if external:
ll = ll.split('&')
ll.pop(-1)
ll = '&'.join(ll)
f.write('\\(%d\\) & '%c3 + ll+'\\\\\n')
c3 +=1
c4+=1
#Table end
f.write('\\hline' +'\n')
f.write('\\end{tabular}' + '\n' + '\\end{table*}')
#Setup second Table
f.write('\n'+'\n'+'\n'+'\n'+'\n'+'\\begin{table*}[]' +'\n')
f.write('\\input{result_single_caption2}\n')
f.write('\\label{table:single2}' +'\n')
f.write('\\tiny')
#Format and head Row
f.write('\\begin{tabular}{l|rrrrr|rr|r|}' +'\n')
f.write('\\# & Name-Lens & \\(DR2\\_ID\\)-Lens & \\(DR2\\_ID\\)-Source & \\(T_{CA}\\) & \\(M_{in}\\)')
f.write(' & \\(\\sigma\\,M_{10}\\) & \\(\\sigma\\,M_{10}/M_{in}\\)')
if external: f.write(' & \\(\\sigma\\,M_{obs}/M_{in}\\)\\\\\n')
else: f.write(' & \\(\\sigma\\,M_{5}\\) \\\\\n')
f.write(' & & & & \\(\\mathrm{Jyear}\\) & \\(\\mathrm{M_{\\odot}}\\)')
if external: f.write(' & \\(\\mathrm{M_{\\odot}}\\) & & \\\\\n')
else: f.write(' & \\(\\mathrm{M_{\\odot}}\\) & & \\(\\mathrm{M_{\\odot}}\\)\\\\\n')
f.write('\\hline' +'\n')
c4 = 0 #Row counter individual tables
#Write rows
for l in range(len(lines)):
if (epoch[l] > 2019.5) & (rel_err[l] <=0.5):
if c4%10> 4: f.write('\\rowcolor{lightGray}\n')
ll=lines[l]
if external:
ll = ll.split('&')
ll.pop(-2)
ll = '&'.join(ll)
f.write('\\(%d\\) & '%c3 + ll+'\\\\\n')
c3 +=1
c4+=1
#Table end
f.write('\\hline' +'\n')
f.write('\\end{tabular}' + '\n' + '\\end{table*}')
f.close()
else:
i = 0
c2 = [15,30,50,100,101,200]
for line in lines:
if c2[i]<= 50:
if c4%10> 4: f.write('\\rowcolor{lightGray}\n')
f.write('\\(%d\\) & '%c3 + line+'\\\\\n')
c3 +=1
c4+=1
if c < len(rel_err):
if rel_err[c]*100 > c2[i]:
while rel_err[c]*100 > c2[i]:
i+=1
if c2[i] == 50:
f.write('\\hline' +'\n')
f.write('\\end{tabular}' + '\n' + '\\end{table*}')
f.write('\n'+'\n'+'\n'+'\n'+'\n'+'\\begin{table*}[]' +'\n')
f.write('\\input{result_single_caption2}\n')
f.write('\\label{table:single2}' +'\n')
f.write('\\tiny')
if external: f.write('\\begin{tabular}{l|rrrrr|rr|r|r|}' +'\n')
else: f.write('\\begin{tabular}{l|rrrrr|rr|r|}' +'\n')
f.write('\\# & Name-Lens & \\(DR2\\_ID\\)-Lens & \\(DR2\\_ID\\)-Source')
f.write(' & \\(T_{CA}\\) & \\(M_{in}\\)')
f.write(' & \\(\\sigma\\,M_{10}\\) & \\(\\sigma\\,M_{10}/M_{in}\\) & \\(\\sigma\\,M_{5} \\)')
if external: f.write(' & \\(\\sigma\\,M_{obs} \\) \\\\\n')
else: f.write(' \\\\\n')
f.write(' & & & & \\(\\mathrm{Jyear}\\) & \\(\\mathrm{M_{\\odot}}\\) \\\\\n')
f.write(' & \\(\\mathrm{M_{\\odot}}\\) & & \\(\\mathrm{M_{\\odot}}\\) \\\\\n')
f.write('\\hline' +'\n')
c4 = 0
if c2[i] >= 100: break
c+=1
f.write('\\hline' +'\n')
f.write('\\end{tabular}' + '\n' + '\\end{table*}')
f.close()
def create_Table_multi(Analysis_multi_result):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
Dat_single, Dat_all, Dat_fps, Dat_sig = Analysis_multi_result
q1 = []
q2 = []
for i in range(len(Dat_all[1])):
lens = Dat_all[2][i][0]
s_Mass = '%.2g'%lens.getMass()
lens_id = Dat_all[1][i]
s_lens_id = str(lens_id)
s_name = name(lens.getId(), False, True)
all_bool = True
fps_bool = False
sig_bool = False
for j in range(len(Dat_fps[1])):
if lens_id == Dat_fps[1][j]:
fps_bool = True
break
for k in range(len(Dat_sig[1])):
if lens_id == Dat_sig[1][k]:
sig_bool = True
break
Vs_delta_M_m = ['','','']
Vs_delta_M_p = ['','','']
Vs_delta_M = ['','','']
rel_err = ['','','']
for uu in range(3):
if [all_bool, fps_bool, sig_bool][uu]:
nn = [i,j,k][uu]
Analysis_result = [Dat_all, Dat_fps, Dat_sig][uu]
mm = np.array([Analysis_result[0][nn][x][0][6] for x in range(len(Analysis_result[0][nn])) if Analysis_result[0][nn][x][0][4] != -999])
mp = np.array([Analysis_result[0][nn][x][0][7] for x in range(len(Analysis_result[0][nn])) if Analysis_result[0][nn][x][0][4] != -999])
mmm = percentile(mm)
ppp = percentile(mp)
delta_M_p = max(ppp[4],-mmm[3])
delta_M_m = min(ppp[3],-mmm[4])
delta_M = max(ppp[1], -mmm[1])
c=0
while 10**(-c)>= delta_M and c < 10:
c+=1
c+=1
s_delta_M_m = str(round(delta_M_m-0.49999999*10**(-c),c))
s_delta_M_p = str(round(delta_M_p+0.49999999*10**(-c),c))
s_delta_M = str(round(delta_M+0.49999999*10**(-c),c))
while(len(s_delta_M_m) <= c): s_delta_M_m=s_delta_M_m+'0'
while(s_delta_M_m[-c-1]) != '.': s_delta_M_m=s_delta_M_m+'0'
while(len(s_delta_M_p) <= c): s_delta_M_p=s_delta_M_p+'0'
while(s_delta_M_p[-c-1]) != '.': s_delta_M_p=s_delta_M_p+'0'
while(len(s_delta_M) <= c): s_delta_M=s_delta_M+'0'
while(s_delta_M[-c-1]) != '.': s_delta_M=s_delta_M+'0'
Vs_delta_M_m[uu] = s_delta_M_m
Vs_delta_M_p[uu] = s_delta_M_p
Vs_delta_M[uu] = s_delta_M
if uu ==0:
rel_err1 = (delta_M/lens.getMass())
rel_err[uu] = '%.2g'%(delta_M/lens.getMass()*100)
# sortiere Source ID into 4
ID_all = [x.getId() for x in Dat_all[2][i][1:]]
ID_fps = [x.getId() for x in Dat_fps[2][j][1:]]
ID_sig = [x.getId() for x in Dat_sig[2][k][1:]]
tps = []
fps = []
sig = []
for ID in ID_all:
if ID in ID_sig: sig.append(ID)
elif ID in ID_fps: fps.append(ID)
else: tps.append(ID)
q1.append([rel_err1,rel_err,s_Mass, s_lens_id,s_name, tps,fps ,sig, Vs_delta_M_m,Vs_delta_M_p,Vs_delta_M])
q1.sort(key=lambda x: x[0])
f = open(paperpath+'result_multi.tex','w')
f.write('\\renewcommand*{\\arraystretch}{1.4}' + '\n')
f.write('\\begin{sidewaystable*}[]' + '\n')
f.write('\\input{result_multi_caption1}\n')
f.write('\\label{table:multi}'+ '\n')
f.write('\\tiny\\begin{tabular}{l|r|rrrr|rr|rr|rr|}' + '\n')
f.write('\\# & Name-Lens & ' + '\n')
f.write('\\(DR2\\_ID\\)-Source & \\(DR2\\_ID\\)-Source' )
f.write(' & \\(DR2\\_ID\\)-Source' + '\n')
f.write(' & \\(M_{\mathrm{in}}\\) & \\(\\sigma\\,M_{\mathrm{all}}\\)')
f.write(' & \\(\\sigma\\,M_{\mathrm{all}}/M_{\mathrm{in}}\\) & \\(\\sigma\\,M_{\mathrm{5-par}} \\)')
f.write(' & \\(\\sigma\\,M_{\mathrm{5-par.}}/M_{in}\\) & \\(\\sigma\\,M_{\mathrm{sig.}} \\)')
f.write(' & \\(\\sigma\\,M_{\mathrm{sig.}}/M_{\mathrm{in}}\\) \\\\' + '\n')
f.write(' & \\(DR2\\_ID\\)-Lens & (2-parameter) & (5-parameter)& (sigma)')
f.write(' & \\(\\Msun{}\\) & \\(\\Msun{}\\) & \\(\\%\\)& \\(\\Msun{}\\) & \\(\\%\\)& \\(\\Msun{}\\) & \\(\\%\\)')
f.write(' \\\\\n')
f.write('\\hline' +'\n')
for n in range(len(q1)):
rel_err1,rel_err, s_Mass, s_lens_id,s_name, tps,fps,sig, Vs_delta_M_m,Vs_delta_M_p,Vs_delta_M =q1[n]
if rel_err1 < 0.5:
ii = 0
if s_name == '' :ll = ['\\(%s\\)'% s_lens_id,]
else: ll = [s_name, '\\(%s\\)'% s_lens_id]
for line_index in range(max([len(tps), len(fps), len(sig),len(ll)])):
if line_index >= len(tps): s_tps = ''
else: s_tps = str(tps[line_index])
if line_index >= len(fps): s_fps = ''
else: s_fps = str(fps[line_index])
if line_index >= len(sig): s_sig = ''
else: s_sig = str(sig[line_index])
if line_index >= len(ll): s_ln = ''
else: s_ln= str(ll[line_index])
if line_index == 0:
line = '%s & %s & \\(%s\\) & \\(%s\\) & \\(%s\\) & \\(%s\\) &'\
%(str(n+1), s_ln, s_tps,s_fps, s_sig, s_Mass)
if len(tps) == 0: line = line + ' & &'
else: line = line+ '\\(\\pm%s^{+%s}_{%s}\\) & \\(%s%s\\) &' \
% (Vs_delta_M[0], Vs_delta_M_p[0], Vs_delta_M_m[0], rel_err[0], '\\%')
if len(fps) == 0: line = line + ' & &'
else:
line = line+ '\\(\\pm%s^{+%s}_{%s}\\) &\\(%s%s\\) &' \
% (Vs_delta_M[1], Vs_delta_M_p[1], Vs_delta_M_m[1], rel_err[1],'\\%')
if len(sig) == 0: line = line + ' \\\\'
else:
line = line+ '\\(\\pm%s^{+%s}_{%s}\\) & \\(%s%s\\) \\\\'\
% (Vs_delta_M[2], Vs_delta_M_p[2], Vs_delta_M_m[2], rel_err[2],'\\%')
else:
line = ' &%s& \\(%s\\) & \\(%s\\) & \\(%s\\) & & & & & & & \\\\'\
% (s_ln, s_tps,s_fps, s_sig)
if n%2 : f.write('\\rowcolor{lightGray}\n')
f.write(line)
f.write('\n')
f.write('\\hline'+ '\n')
f.write('\\end{tabular}' +'\n' +'\\end{sidewaystable*}')
f.close() |
# -*- coding: utf-8 -*-
import codecs
from setuptools import setup
packages = \
['colour_hdri',
'colour_hdri.calibration',
'colour_hdri.calibration.tests',
'colour_hdri.exposure',
'colour_hdri.exposure.tests',
'colour_hdri.generation',
'colour_hdri.generation.tests',
'colour_hdri.models',
'colour_hdri.models.datasets',
'colour_hdri.models.tests',
'colour_hdri.plotting',
'colour_hdri.process',
'colour_hdri.process.tests',
'colour_hdri.recovery',
'colour_hdri.recovery.tests',
'colour_hdri.sampling',
'colour_hdri.sampling.tests',
'colour_hdri.tonemapping',
'colour_hdri.tonemapping.global_operators',
'colour_hdri.tonemapping.global_operators.tests',
'colour_hdri.utilities',
'colour_hdri.utilities.tests']
package_data = \
{'': ['*'],
'colour_hdri': ['examples/*',
'resources/colour-hdri-examples-datasets/*',
'resources/colour-hdri-tests-datasets/*']}
install_requires = \
['colour-science>=0.3.16,<0.4.0', 'recordclass']
extras_require = \
{'development': ['biblib-simple',
'coverage',
'coveralls',
'flake8',
'invoke',
'jupyter',
'mock',
'nose',
'pre-commit',
'pytest',
'restructuredtext-lint',
'sphinx<=3.1.2',
'sphinx_rtd_theme',
'sphinxcontrib-bibtex',
'toml',
'twine',
'yapf==0.23'],
'optional': ['colour-demosaicing', 'rawpy'],
'plotting': ['matplotlib'],
'read-the-docs': ['mock', 'numpy', 'sphinxcontrib-bibtex']}
setup(
name='colour-hdri',
version='0.1.8',
description='HDRI / Radiance image processing algorithms for Python',
long_description=codecs.open('README.rst', encoding='utf8').read(),
author='Colour Developers',
author_email='[email protected]',
maintainer='Colour Developers',
maintainer_email='[email protected]',
url='https://www.colour-science.org/',
packages=packages,
package_data=package_data,
install_requires=install_requires,
extras_require=extras_require,
python_requires='>=3.6,<4.0',
)
|
import os
import sys
import codecs
import argparse
from _pickle import load, dump
import collections
from utils import get_processing_word, is_dataset_tag, make_sure_path_exists, get_bmes
from fastNLP import Instance, DataSet, Vocabulary, Const
max_len = 0
def expand(x):
sent = ["<sos>"] + x[1:] + ["<eos>"]
return [x + y for x, y in zip(sent[:-1], sent[1:])]
def read_file(filename, processing_word=get_processing_word(lowercase=False)):
dataset = DataSet()
niter = 0
with codecs.open(filename, "r", "utf-8-sig") as f:
words, tags = [], []
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith("-DOCSTART-"):
if len(words) != 0:
assert len(words) > 2
if niter == 1:
print(words, tags)
niter += 1
dataset.append(Instance(ori_words=words[:-1], ori_tags=tags[:-1]))
words, tags = [], []
else:
word, tag = line.split()
word = processing_word(word)
words.append(word)
tags.append(tag.lower())
dataset.apply_field(lambda x: [x[0]], field_name="ori_words", new_field_name="task")
dataset.apply_field(
lambda x: len(x), field_name="ori_tags", new_field_name="seq_len"
)
dataset.apply_field(
lambda x: expand(x), field_name="ori_words", new_field_name="bi1"
)
return dataset
def main():
parser = argparse.ArgumentParser()
# fmt: off
parser.add_argument("--data_path", required=True, type=str, help="all of datasets pkl paths")
# fmt: on
options, _ = parser.parse_known_args()
train_set, test_set = DataSet(), DataSet()
input_dir = os.path.join(options.data_path, "joint-sighan2008/bmes")
options.output = os.path.join(options.data_path, "total_dataset.pkl")
print(input_dir, options.output)
for fn in os.listdir(input_dir):
if fn not in ["test.txt", "train-all.txt"]:
continue
print(fn)
abs_fn = os.path.join(input_dir, fn)
ds = read_file(abs_fn)
if "test.txt" == fn:
test_set = ds
else:
train_set = ds
print(
"num samples of total train, test: {}, {}".format(len(train_set), len(test_set))
)
uni_vocab = Vocabulary(min_freq=None).from_dataset(
train_set, test_set, field_name="ori_words"
)
# bi_vocab = Vocabulary(min_freq=3, max_size=50000).from_dataset(train_set,test_set, field_name="bi1")
bi_vocab = Vocabulary(min_freq=3, max_size=None).from_dataset(
train_set, field_name="bi1", no_create_entry_dataset=[test_set]
)
tag_vocab = Vocabulary(min_freq=None, padding="s", unknown=None).from_dataset(
train_set, field_name="ori_tags"
)
task_vocab = Vocabulary(min_freq=None, padding=None, unknown=None).from_dataset(
train_set, field_name="task"
)
def to_index(dataset):
uni_vocab.index_dataset(dataset, field_name="ori_words", new_field_name="uni")
tag_vocab.index_dataset(dataset, field_name="ori_tags", new_field_name="tags")
task_vocab.index_dataset(dataset, field_name="task", new_field_name="task")
dataset.apply_field(lambda x: x[1:], field_name="bi1", new_field_name="bi2")
dataset.apply_field(lambda x: x[:-1], field_name="bi1", new_field_name="bi1")
bi_vocab.index_dataset(dataset, field_name="bi1", new_field_name="bi1")
bi_vocab.index_dataset(dataset, field_name="bi2", new_field_name="bi2")
dataset.set_input("task", "uni", "bi1", "bi2", "seq_len")
dataset.set_target("tags")
return dataset
train_set = to_index(train_set)
test_set = to_index(test_set)
output = {}
output["train_set"] = train_set
output["test_set"] = test_set
output["uni_vocab"] = uni_vocab
output["bi_vocab"] = bi_vocab
output["tag_vocab"] = tag_vocab
output["task_vocab"] = task_vocab
print(tag_vocab.word2idx)
print(task_vocab.word2idx)
make_sure_path_exists(os.path.dirname(options.output))
print("Saving dataset to {}".format(os.path.abspath(options.output)))
with open(options.output, "wb") as outfile:
dump(output, outfile)
print(len(task_vocab), len(tag_vocab), len(uni_vocab), len(bi_vocab))
dic = {}
tokens = {}
def process(words):
name = words[0][1:-1]
if name not in dic:
dic[name] = set()
tokens[name] = 0
tokens[name] += len(words[1:])
dic[name].update(words[1:])
train_set.apply_field(process, "ori_words", None)
for name in dic.keys():
print(name, len(dic[name]), tokens[name])
with open(os.path.join(os.path.dirname(options.output), "oovdict.pkl"), "wb") as f:
dump(dic, f)
def get_max_len(ds):
global max_len
max_len = 0
def find_max_len(words):
global max_len
if max_len < len(words):
max_len = len(words)
ds.apply_field(find_max_len, "ori_words", None)
return max_len
print(
"train max len: {}, test max len: {}".format(
get_max_len(train_set), get_max_len(test_set)
)
)
if __name__ == "__main__":
main()
|
import numpy as np
from collections import namedtuple
import random
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class Schedule(object):
def __init__(self, n, start, stop):
self.n = n
self.start = start
self.stop = stop
def get_value(self, i):
val = self.start - i*(self.start - self.stop)/self.n
if val < self.stop:
return self.stop
return val
class QLearning(object):
def get_name(self):
return "qlearning"
def __init__(self,actions,params={}):
self.Qs = {}
self.alpha = 0.1
if 'alpha' in params.keys():
self.alpha=params['alpha']
self.exploration_fraction = 0.1
if 'epsfrac' in params.keys():
self.exploration_fraction = params['epsfrac']
self.total_episodes = int(params['num_episodes'])
self.EPS_END = 0.01
self.exploration = Schedule(int(self.exploration_fraction*self.total_episodes),
1.0,
self.EPS_END)
self.num_episodes = 0
self.num_actions = actions
self.traj = []
print("[QLEARNING] Initialized with parameters: alpha: %s, epsfrac: %s" % (str(self.alpha), str(self.exploration_fraction)), flush=True)
def select_action(self, x):
s = self.state_to_str(x)
if s not in self.Qs.keys():
self.Qs[s] = [0 for a in range(self.num_actions)]
eps_threshold = self.exploration.get_value(self.num_episodes)
sample = random.random()
if sample > eps_threshold:
Qvals = self.Qs[s]
action = np.random.choice(np.flatnonzero(Qvals == np.max(Qvals)))
else:
action = random.randrange(self.num_actions)
return (action)
def get_value(self,x):
s = self.state_to_str(x)
if s not in self.Qs.keys():
## This should only happen at the end of the episode so put 0 here.
self.Qs[s] = [0 for a in range(self.num_actions)]
Qvals = self.Qs[s]
return(np.max(Qvals))
def save_transition(self, state, action, reward, next_state):
self.traj.append(Transition(state, action, next_state, reward))
def finish_episode(self):
self.num_episodes += 1
for transition in self.traj:
x = transition.state
a = transition.action
r = transition.reward
xp = transition.next_state
s = self.state_to_str(x)
V = 0.0 if xp is None else self.get_value(xp)
Qvals = self.Qs[s]
Qvals[a] = (1-self.alpha)*Qvals[a] + self.alpha*(r + V)
self.traj = []
def state_to_str(self,x):
return("".join([str(z) for z in x]))
|
def inclusive_sum(n, m):
var = 0
for i in range(n, m+1):
var += i
return var
|
from django.db import models
from mls_api.models.base import BaseModel
class Team(BaseModel):
''' Soccer teams '''
name = models.CharField(max_length=128)
slug = models.SlugField(unique=True)
games = models.ManyToManyField('Game', through='GameTeam')
def __unicode__(self):
return u'%s' % self.name
@models.permalink
def get_absolute_url(self):
return ('team_detail', (), {'slug': self.slug})
def points_by_comp(self, competition):
return self.gameteam_set.filter(
game__competition__year=2013
).aggregate(models.Sum('result__points'))['result__points__sum']
class Meta:
ordering = ('name',)
app_label = 'mls_api'
class GameTeam(BaseModel):
team = models.ForeignKey('Team')
game = models.ForeignKey('Game')
result = models.ForeignKey('Result', null=True)
home = models.BooleanField(default=False)
def __unicode__(self):
return self.team.__unicode__()
class Meta:
app_label = 'mls_api'
|
import nltk, textblob
text="""I. The Period
It was the best of times,
it was the worst of times,
it was the age of wisdom,
it was the age of foolishness,
it was the epoch of belief,
it was the epoch of incredulity,
it was the season of Light,
it was the season of Darkness,
it was the spring of hope,
it was the winter of despair,
we had everything before us,
we had nothing before us,
we were all going direct to Heaven,
we were all going direct the other way--
in short, the period was so far like the present period, that some of
its noisiest authorities insisted on its being received, for good or for
evil, in the superlative degree of comparison only.
There were a king with a large jaw and a queen with a plain face, on the
throne of England; there were a king with a large jaw and a queen with
a fair face, on the throne of France. In both countries it was clearer
than crystal to the lords of the State preserves of loaves and fishes,
that things in general were settled for ever."""
blob=textblob.TextBlob(text)
blob.words
print(nltk.pos_tag(blob.words))
lowerwords=[x.lower() for x in blob.words]
from nltk.corpus import stopwords
print(stopwords.words('english'))
filtered=list(filter(lambda x: not x in stopwords.words('english'), lowerwords))
filtered=[x for x in lowerwords if not x in stopwords.words('english')]
from nltk.stem import WordNetLemmatizer
wnl = WordNetLemmatizer()
lemmas=[wnl.lemmatize(x) for x in filtered]
from nltk.corpus import wordnet as wn
list(map(lambda x :wn.synsets(x), filtered))
wn.synset('car.n.01').lemma_names()
|
import numpy as np
import cv2
import pandas as pd
male = pd.read_csv("data/names/herrenavn.csv")
female = pd.read_csv("data/names/kvinnenavn.csv")
maleLength = len(male['Navn'])
feMaleLength = len(female['Navn'])
def draw_information(image_total, loc, faces_df, analyzis_object, useRandomNames=False):
currentDf = male if analyzis_object['gender'] == "Man" else female
currentLength = maleLength if analyzis_object['gender'] == "Man" else feMaleLength
randomInt = np.random.randint(1, currentLength)
if (useRandomNames):
score = 0
identity = currentDf.iloc[randomInt]['Navn']
else:
if(len(faces_df['identity']) > 0):
identity = faces_df.iloc[0]['identity'].split("/")[2]
score = faces_df.iloc[0]['VGG-Face_cosine']
else:
#choose random name if none identities was found
identity = currentDf.iloc[randomInt]['Navn']
x, y, width, _ = loc # x, y is the coordinate of the top left corner.
draw_rectangle_with_opacity(image_total, loc)
add_text(
image_total,
text=f"Name: {identity} ({score:.2f})",
org=(x + width + 10, y + 25)
)
add_text(
image_total,
text = f"Age: {analyzis_object['age']}",
org = (x + width + 10, y + 75),
)
add_text(
image_total,
text = f"Sex: {analyzis_object['gender']}",
org = (x + width + 10, y + 125),
)
add_text(
image_total,
text = f"Emotion: {analyzis_object['dominant_emotion']}",
org = (x + width + 10, y + 175),
)
add_text(
image_total,
text = f"Race: {analyzis_object['dominant_race']}",
org = (x + width + 10, y + 225),
)
def add_text(image_total, text, org):
cv2.putText(
img = image_total,
text = text,
org = org,
fontFace = cv2.FONT_HERSHEY_SIMPLEX,
fontScale = 0.75,
thickness = 2,
color = 0
)
def draw_rectangle_with_opacity(image_total, loc):
x, y, width, height = loc # x, y is the coordinate of the top left corner.
sub_img = image_total[y:y+height, x+width:x+width+300]
white_rect = np.ones(sub_img.shape, dtype=np.uint8) * 255
res = cv2.addWeighted(sub_img, 0.5, white_rect, 0.5, 1.0)
image_total[y:y+height, x+width:x+width+300] = res
def draw_bounding_box(image_total, loc, keypoints):
(x, y, width, height) = loc
cv2.rectangle(
img = image_total,
pt1 = (x, y),
pt2 = (x + width, y + height),
color = (0, 155, 255),
thickness = 2
)
cv2.circle(
img = image_total,
center = (keypoints['left_eye']),
radius = 2,
color = (0, 155, 255),
thickness = 2
)
cv2.circle(image_total, (keypoints['right_eye']), 2, (0, 155, 255), 2)
cv2.circle(image_total, (keypoints['nose']), 2, (0, 155, 255), 2)
cv2.circle(image_total, (keypoints['mouth_left']), 2, (0, 155, 255), 2)
cv2.circle(image_total, (keypoints['mouth_right']), 2, (0, 155, 255), 2)
|
import unittest
from pprint import pprint
from random import randint
from pydsalg.datastruct.dynarray import DynamicArray
class TestDynamicArray(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.input0 = list(range(20))
def test_00(self):
dynarr = DynamicArray()
input0 = self.input0.copy()
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
print(dynarr._arr)
print(dynarr.remove())
print(dynarr.remove())
print(dynarr.remove())
print(dynarr.remove())
print(dynarr.remove())
print(dynarr.remove())
print(dynarr.remove())
print(dynarr.remove())
print(dynarr.remove())
print(dynarr.remove())
for i in 'abcdefghijklmnopq':
dynarr.append(i)
print(dynarr._arr)
def test_01(self):
dynarr = DynamicArray(start=0)
input0 = self.input0.copy()
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
dynarr.append(input0.pop(0))
#print(dynarr._arr)
if __name__ == '__main__':
unittest.main()
|
import os
import git
from packit.api import PackitAPI
from subprocess import check_output
from flexmock import flexmock
from tests.testsuite_recording.integration.testbase import PackitUnittestOgr
class ProposeUpdate(PackitUnittestOgr):
def setUp(self):
super().setUp()
self.api = PackitAPI(
config=self.conf, package_config=self.pc, upstream_local_project=self.lp
)
self.api._up = self.upstream
self.api._dg = self.dg
# Do not upload package, because no credentials given in CI
flexmock(self.api).should_receive("_handle_sources").and_return(None)
flexmock(self.api.dg).should_receive("push").and_return(None)
flexmock(git.HEAD).should_receive("commit").and_return("hash-of-some-commit")
self.set_git_user()
def test_propose_update(self):
# change specfile little bit to have there some change
specfile_location = os.path.join(self.lp.working_dir, "python-ogr.spec")
with open(specfile_location, "r") as myfile:
filedata = myfile.read()
# Patch the specfile with new version
version_increase = "0.0.0"
for line in filedata.splitlines():
if "Version:" in line:
version = line.rsplit(" ", 1)[1]
v1, v2, v3 = version.split(".")
version_increase = ".".join([v1, str(int(v2) + 1), v3])
filedata = filedata.replace(version, version_increase)
break
with open(specfile_location, "w") as myfile:
myfile.write(filedata)
check_output(
f"cd {self.lp.working_dir};"
f"git commit -m 'test change' python-ogr.spec;"
f"git tag -a {version_increase} -m 'my version {version_increase}'",
shell=True,
)
self.api.sync_release("master")
|
from hammer_tools.material_library.db import connect
from hammer_tools.material_library.text import alphaNumericTokens
class MapType(object):
Unknown = 'unknown'
Thumbnail = 'thumb'
Diffuse = 'diff'
Roughness = 'rough'
Glossiness = 'gloss'
Metalness = 'metal'
Reflection = 'refl'
Refraction = 'refr'
Normal = 'normal'
Bump = 'bump'
Subsurface = 'sss'
Opacity = 'opacity'
Emission = 'emission'
Displacement = 'disp'
AmbientOcclusion = 'ao'
__labels = None
@staticmethod
def allTypes():
return (
MapType.Unknown,
MapType.Thumbnail,
MapType.Diffuse,
MapType.Roughness,
MapType.Glossiness,
MapType.Metalness,
MapType.Reflection,
MapType.Refraction,
MapType.Normal,
MapType.Bump,
MapType.Subsurface,
MapType.Opacity,
MapType.Emission,
MapType.Displacement,
MapType.AmbientOcclusion
)
@staticmethod
def typeName(map_type):
return {
MapType.Unknown: 'Unknown',
MapType.Thumbnail: 'Thumbnail',
MapType.Diffuse: 'Diffuse',
MapType.Roughness: 'Roughness',
MapType.Glossiness: 'Glossiness',
MapType.Metalness: 'Metalness',
MapType.Reflection: 'Reflection',
MapType.Refraction: 'Refraction',
MapType.Normal: 'Normal',
MapType.Bump: 'Bump',
MapType.Subsurface: 'Subsurface',
MapType.Opacity: 'Opacity',
MapType.Emission: 'Emission',
MapType.Displacement: 'Displacement',
MapType.AmbientOcclusion: 'Ambient Occlusion'
}[map_type]
@staticmethod
def allLabels(reload=False):
if not reload and MapType.__labels is not None:
return MapType.__labels
with connect() as connection:
cursor = connection.execute('SELECT * FROM map_types_labels')
labels = {map_type: [] for map_type in MapType.allTypes()}
for data in cursor.fetchall():
map_type = data['map_type']
label = data['label']
labels[map_type].append(label)
MapType.__labels = {map_type: tuple(labels) for map_type, labels in labels.items()}
return MapType.__labels
@staticmethod
def labels(map_type, reload=False):
if not reload and MapType.__labels is not None and map_type in MapType.__labels:
return MapType.__labels[map_type]
with connect() as connection:
cursor = connection.execute('SELECT label FROM map_types_labels WHERE map_type = :map_type',
{'map_type': map_type})
labels = tuple(row['label'] for row in cursor.fetchall())
if MapType.__labels:
MapType.__labels[map_type] = labels
return labels
@staticmethod
def mapType(name):
name_tokens = alphaNumericTokens(name.lower())[::-1]
found_pos = float('+inf')
found_type = None
for map_type, tags in MapType.allLabels().items():
for tag in tags:
if tag in name_tokens:
pos = name_tokens.index(tag)
if pos > found_pos:
continue
elif pos == found_pos:
raise AssertionError('Found intersections between tags in different map types.')
found_pos = pos
found_type = map_type
break
return found_type or MapType.Unknown
DEFAULT_MAP_TYPES_LABELS = {
MapType.Unknown: (),
MapType.Thumbnail: ('thumbnail', 'thumb', 'preview'),
MapType.Diffuse: ('diffuse', 'diff', 'albedo', 'basecolor', 'color'),
MapType.Roughness: ('roughness', 'rough'),
MapType.Glossiness: ('glossiness', 'gloss'),
MapType.Metalness: ('metalness', 'metallic', 'metal'),
MapType.Reflection: ('reflection', 'refl', 'specular', 'spec'),
MapType.Refraction: ('refraction', 'refr', 'transparency'),
MapType.Normal: ('normal', 'norm'),
MapType.Bump: ('bump',),
MapType.Subsurface: ('subsurface', 'sss'),
MapType.Opacity: ('opacity', 'alpha', 'cutout'),
MapType.Emission: ('emission', 'emissive'),
MapType.Displacement: ('displacement', 'disp', 'height'),
MapType.AmbientOcclusion: ('ambientocclusion', 'ambient', 'occlusion', 'ao')
}
|
"""This module proposes Pytorch style LightningModule classes for the gnn use-case."""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytorch_lightning as pl
from pytorch_lightning.utilities.cli import MODEL_REGISTRY
import torch
import torch.nn as nn
import torch_geometric as pyg
import torch_optimizer as optim
import torchmetrics.functional as tmf
from typing import List, Tuple
import plotters
class CombustionModule(pl.LightningModule):
"""
Contains the basic logic meant for all GNN-like models experiments.
Loss is MSE and the metric of interest is R2 determination score.
"""
def __init__(self):
"""Init the CombustionModule class."""
super().__init__()
self.grid_shape = None
def forward(self, x_val: torch.Tensor, edge_index: torch.Tensor) -> torch.Tensor:
"""Compute the forward pass.
Args:
x_val (torch.Tensor): Nodes features.
edge_index (torch.Tensor): Connectivity matrix.
Returns:
(torch.Tensor): Resulting model forward pass.
"""
return self.model(x_val, edge_index)
def _common_step(self,
batch: torch.Tensor,
batch_idx: int,
stage: str) -> List[torch.Tensor]:
"""Define the common operations performed on data."""
batch_size = batch.ptr[0] - 1
y_hat = self(batch.x, batch.edge_index)
loss = tmf.mean_squared_error(y_hat, batch.y)
r2 = tmf.r2_score(y_hat, batch.y)
self.log(f"{stage}_loss", loss, prog_bar=True, on_step=True, batch_size=batch_size)
self.log(f"{stage}_r2", r2, on_step=True, batch_size=batch_size)
return y_hat, loss, r2
def training_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:
"""Compute one training step.
Args:
batch (torch.Tensor): Batch containing nodes features and connectivity matrix.
batch_idx (int): Batch index.
Returns:
(torch.Tensor): Loss.
"""
_, loss, _ = self._common_step(batch, batch_idx, "train")
return loss
def validation_step(self, batch: torch.Tensor, batch_idx: int) -> None:
"""Compute one validation step.
Args:
batch (torch.Tensor): Batch containing nodes features and connectivity matrix.
batch_idx (int): Batch index.
"""
y_hat, _, _ = self._common_step(batch, batch_idx, "val")
def test_step(self, batch: torch.Tensor, batch_idx: int) -> Tuple[torch.Tensor]:
"""Compute one testing step. Additionally, also generates outputs to plots for the test
Dataset.
Args:
batch (torch.Tensor): Batch containing nodes features and connectivity matrix.
batch_idx (int): Batch index.
Returns:
(Tuple[torch.Tensor]): (Ground truth, Predictions)
"""
y_hat, _, _ = self._common_step(batch, batch_idx, "test")
pos = np.stack(batch.pos.cpu().numpy())
x_max = np.max(pos[:, 0:1])
y_max = np.max(pos[:, 1:2])
z_max = np.max(pos[:, 2:3])
if not self.grid_shape:
self.grid_shape = (x_max + 1, y_max + 1, z_max + 1)
return batch.y, y_hat
def test_epoch_end(self, outputs: list) -> None:
"""Gather all the outputs from the test_step to plot the test Dataset.
Args:
outputs (List[Tuple[torch.Tensor]]): all batches containing a pair of
(ground truth, prediction)
"""
ys = list()
y_hats = list()
for out in outputs:
ys.append(out[0])
y_hats.append(out[1])
self.ys = np.asarray([t.cpu().numpy().reshape((-1,) + self.grid_shape) for t in ys])
self.y_hats = np.asarray([t.cpu().numpy().reshape((-1,) + self.grid_shape) for t in y_hats])
self.plotter = plotters.Plotter(self.model.__class__.__name__, self.grid_shape)
self.plotter.cross_section(self.plotter.zslice, self.ys, self.y_hats)
self.plotter.dispersion_plot(self.ys, self.y_hats)
self.plotter.histo(self.ys, self.y_hats)
self.plotter.histo2d(self.ys, self.y_hats)
self.plotter.boxplot(self.ys, self.y_hats)
def configure_optimizers(self) -> optim.Optimizer:
"""Set the model optimizer.
Returns:
(torch_optimizer.Optimizer): Optimizer
"""
return optim.AdamP(self.parameters(), lr=self.lr)
@MODEL_REGISTRY
class LitGAT(CombustionModule):
"""Graph-ATtention net as described in the “Graph Attention Networks” paper."""
def __init__(self,
in_channels: int,
hidden_channels: int,
out_channels: int,
num_layers: int,
dropout: float,
heads: int,
jk: str,
lr: float) -> None:
"""Init the LitGAT class."""
super().__init__()
self.save_hyperparameters()
self.lr = lr
self.model = pyg.nn.GAT(in_channels=in_channels,
hidden_channels=hidden_channels,
out_channels=out_channels,
num_layers=num_layers,
dropout=dropout,
act=nn.SiLU(inplace=True),
heads=heads,
jk=jk)
@MODEL_REGISTRY
class LitGCN(CombustionModule):
"""Classic stack of GCN layers.
“Semi-supervised Classification with Graph Convolutional Networks”.
"""
def __init__(self,
in_channels: int,
hidden_channels: int,
out_channels: int,
num_layers: int,
dropout: float,
jk: str,
lr: float) -> None:
"""Init the LitGCN."""
super().__init__()
self.save_hyperparameters()
self.lr = lr
self.model = pyg.nn.GCN(in_channels=in_channels,
hidden_channels=hidden_channels,
out_channels=out_channels,
num_layers=num_layers,
dropout=dropout,
jk=jk,
act=nn.SiLU(inplace=True))
@MODEL_REGISTRY
class LitGraphUNet(CombustionModule):
"""Graph-Unet as described in “Graph U-Nets”."""
def __init__(self,
in_channels: int,
hidden_channels: int,
out_channels: int,
depth: int,
pool_ratios: float,
lr: float) -> None:
"""Init the LitGraphUNet class."""
super().__init__()
self.save_hyperparameters()
self.lr = lr
self.model = pyg.nn.GraphUNet(in_channels=in_channels,
hidden_channels=hidden_channels,
out_channels=out_channels,
depth=depth,
pool_ratios=pool_ratios,
act=nn.SiLU(inplace=True))
@MODEL_REGISTRY
class LitGIN(CombustionModule):
"""GNN implementation of “How Powerful are Graph Neural Networks?”."""
def __init__(self,
in_channels: int,
hidden_channels: int,
out_channels: int,
num_layers: int,
dropout: float,
lr: float) -> None:
"""Init the LitGIN class."""
super().__init__()
self.save_hyperparameters()
self.lr = lr
self.model = pyg.nn.GIN(in_channels=in_channels,
hidden_channels=hidden_channels,
out_channels=out_channels,
num_layers=num_layers,
dropout=dropout,
act=nn.SiLU(inplace=True))
|
import numpy as np
from tensorflow.keras.utils import to_categorical
from sklearn.utils import class_weight
import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import cv2 as cv
import os
import time
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
image_height=90
image_width=90
#encode expression to a unique number for classification
def expression_to_label(expression):
if expression == 'Neutral':
return 0
if expression == 'Happiness':
return 1
if expression == 'Fear':
return 2
if expression == 'Surprise':
return 3
if expression == 'Sadness':
return 4
#load images and labels
path='sorted_training_5'
x_train=[]
y_train=[]
count=0
for expression in os.listdir(path):
for image in os.listdir(os.path.join(path,expression)):
img=cv.imread(os.path.join(path,expression,image),1)
img=cv.resize(img,(image_height,image_width))
x_train.append(img)
y_train.append(expression_to_label(expression))
count=count+1
print("Loaded: ", count)
x_test=[]
y_test=[]
path1='sorted_validation_5'
for expression in os.listdir(path1):
for image in os.listdir(os.path.join(path1,expression)):
img=cv.imread(os.path.join(path1,expression,image),1)
img=cv.resize(img,(image_height,image_width))
x_test.append(img)
y_test.append(expression_to_label(expression))
count=count+1
print("Loaded: ", count)
x_train=np.array(x_train)
y_train=np.array(y_train)
x_test=np.array(x_test)
y_test=np.array(y_test)
#save in .npy format for future use
#np.save('x_train_5.npy',x_train)
#np.save('y_train_5.npy',y_train)
#np.save('x_test_5.npy',x_test)
#np.save('y_test_5.npy',y_test)
#load the data
# x_train=np.load('x_train_5.npy', allow_pickle=True)
# y_train=np.load('y_train_5.npy',allow_pickle=True)
# x_test=np.load('x_test_5.npy', allow_pickle=True)
# y_test=np.load('y_test_5.npy',allow_pickle=True)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
#shuffle the data
x_train,y_train=shuffle(x_train, y_train, random_state=0)
x_test, y_test=shuffle(x_test, y_test, random_state=0)
print(y_train)
#normalize our pixel values
x_train=x_train/255
x_test=x_test/255
#convert labels to one hot encodings
y_train=to_categorical(y_train, num_classes=5)
y_test=to_categorical(y_test, num_classes=5)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
#data augmentation to avoid overfitting
datagen=ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
zoom_range=0.15,
fill_mode="nearest")
#load pre-trained inceptionV3 model with imagenet weights
inception=tf.keras.applications.InceptionV3(
include_top=False,
weights="imagenet",
input_tensor=None,
input_shape=(image_height, image_width,3),
pooling='avg',
classes=5,
)
#freeze inception model so its weights doesn't get updated
inception.trainaible=False
#freeze some layer in inception model
#for layer in model.layers[:249]:
# layer.trainable = False
#define own dense layers for classification of 5 classes
x=inception.output
x=tf.keras.layers.Dense(1024,activation='relu')(x)
x=tf.keras.layers.Dense(512,activation='relu')(x)
prediction=tf.keras.layers.Dense(5,activation='softmax')(x)
#define our model
model=tf.keras.Model(inputs=inception.input, outputs=prediction)
#print summary
print(model.summary())
print(len(model.layers))
#compile the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#fit the model
#history=model.fit(x_train, y_train, epochs=100, validation_data=(x_test, y_test), batch_size=256)
history=model.fit_generator(datagen.flow(x_train, y_train, batch_size=256), epochs=100 , validation_data=(x_test, y_test))
#save final model
#plot acc and val_acc
model.save('final_model.h5')
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('acc')
plt.xlabel('epochs')
plt.legend(('acc','val_acc'), loc='upper left')
plt.show()
#plot loss and val_loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.legend(('loss','val_loss'), loc='upper left')
plt.show()
print("Max validation accuracy: ", max(history.history['val_acc']))
print("Max Training Accuracy: ", max(history.history['acc']))
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Log server
import socket
import threading
import sys
import time
from datetime import date
HOST = '' # Symbolic name meaning the local host
PORT = 50007 # Arbitrary non-privileged port
if len(sys.argv) > 2:
print "usage : %s [port]" % (sys.argv[0])
sys.exit(-2)
elif len(sys.argv) == 2:
# The user has specified a port number, try to use it.
try:
PORT = int(sys.argv[1])
except:
print "Invalid port no. %s specified" % (sys.argv[1])
sys.exit(-1)
# Check if the port falls in the valid range.
if PORT > 65535 or PORT < 0:
print " port number cannot exceed 65535 or lesser than 0"
sys.exit(-1)
print "Trying to use port ", PORT
else:
print " Port not specified Using default port " , PORT
class Worker(threading.Thread):
def __init__(self, conn, address):
threading.Thread.__init__(self)
self.conn = conn
self.address = address
self.filename = time.strftime("%d-%b-%Y-%H-%M-%S.log",time.localtime())
self.file = open(self.filename,'w')
self.runFlag = 1
def run(self):
print 'Log filename is ',self.filename
while self.runFlag:
data = self.conn.recv(1024)
if not data: break
self.file.write(data)
#self.file.write('\n')
self.file.flush()
if len(data):
print data
#print "\n" #new line.
self.conn.close()
self.file.close()
print "Connection closed :" , self.address
# --------------------- main() ----------------------
try:
# create the socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
# bind to the indicated port.
s.bind((HOST, PORT))
s.listen(5)
workers = []
while 1:
print "Waiting for connections on Port %d ....." % PORT
conn, addr = s.accept()
print 'Connected by', addr
worker = Worker(conn,addr)
worker.start()
workers.append(worker)
except :
print 'caught an exception, exiting...'
print "Exception Details:", sys.exc_info()[0], sys.exc_info() [1]
sys.exit(-2)
|
from setuptools import setup
from os import path
import re
def read(fname):
return open(path.join(path.dirname(__file__), fname)).read()
def get_version():
with open('CHANGELOG.rst') as changelog:
for line in changelog:
if re.match(r'^\d+\.\d+\.\d+$', line):
return line
setup(
name='py-toolbox',
version=get_version(),
author='Daniel Grießhaber',
author_email='[email protected]',
url='https://github.com/dangrie158/py-toolbox',
packages=['pytb', 'pytb.test'],
include_package_data=True,
license='MIT',
description='A collection of commonly used python snippets',
long_description=read('README.rst'),
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 1 - Planning',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
],
entry_points = {
'console_scripts': ['pytb=pytb.__main__:main'],
}
)
|
"""
Astra API client
```
from astra import Astra
```
"""
from .client import Astra
__all__ = ["Astra"]
|
import unittest
class TestDummy(unittest.TestCase):
def test_import(self):
pass
def test_failure(self):
self.assert(False)
|
import numpy as np
import cv2
import math
import matplotlib.pyplot as plt
# import os
# import pandas as pd
import glob
import seaborn as sns
from tqdm import tqdm
import matplotlib as mpl
# import warnings; warnings.filterwarnings(action='once')
large = 22
med = 16
small = 12
params = {'legend.fontsize': med,
'figure.figsize': (16, 10),
'axes.labelsize': med,
'axes.titlesize': med,
'xtick.labelsize': med,
'ytick.labelsize': med,
'figure.titlesize': large}
plt.rcParams.update(params)
plt.style.use('seaborn-whitegrid')
sns.set_style("white")
# %matplotlib inline
# Version
print(mpl.__version__) # > 3.0.0
print(sns.__version__) # > 0.9.0
def estimateStandardDeviation(image):
"""
Estimate the standard deviation of the image.
Parameters
----------
image : ndarray
Image to estimate the standard deviation.
Returns
-------
float
The standard deviation of the image.
"""
width, height = image.shape
operator = laplaceElement()
return np.sqrt(math.pi / 2) * 1 / (6 * (width-2) * (height-2)) * np.sum(np.abs(cv2.filter2D(image, -1, operator)))
def laplaceElement():
"""
Create a Laplace filter element.
Returns
-------
ndarray
Laplace filter element.
"""
L1 = np.array([[0, 1, 0],
[1, -4, 1],
[0, 1, 0]], dtype=np.float)
L2 = np.array([[1, 0, 1],
[0, -4, 0],
[1, 0, 1]], dtype=np.float)
return L2 - 2*L1
def check_noise(file_path, thresh=0.1):
"""
Check if the file is noise or not.
Parameters
----------
file_path : str
Path to the file.
Returns
-------
bool
True if the file is noise, False if not.
"""
img = cv2.imread(file_path, 0)
sigma_n = estimateStandardDeviation(img)
if sigma_n < thresh:
return False
else:
return True
def readImagefromFolder(folder="/home/nguyentansy/PhD-work/PhD-project/2021/src/Pre-processing/Isotropic/data/labeled-images/"):
sigma_list1 = []
sigma_list2 = []
for filename in tqdm(glob.glob("%s/*/*/*/*" % folder)):
img = cv2.imread(filename, 0)
sigma_n = estimateStandardDeviation(img)
sigma_list1.append(sigma_n)
for filename in tqdm(glob.glob("%s/*/pathological-findings/*/*" % folder)):
img = cv2.imread(filename, 0)
sigma_n = estimateStandardDeviation(img)
sigma_list2.append(sigma_n)
return sigma_list1, sigma_list2
def plotHistogram(arr, arr2):
"""
Plot the histogram of the array.
Parameters
----------
arr : ndarray
Array to plot the histogram.
"""
# plt.hist(arr.ravel(), 256, [0, 256])
# plt.show()
# Draw Plot
plt.figure(figsize=(13, 10), dpi=80)
sns.histplot(arr, color="g", label="labeled images")
sns.histplot(arr2, label="pathological findings", color="orange")
# plt.ylim(0, 0.35)
plt.xticks(np.arange(0, 2, 0.05), rotation=45)
# Decoration
plt.title('Noise standard deviation analysis', fontsize=22)
plt.legend()
filesave = "/home/nguyentansy/PhD-work/PhD-project/2021/src/Pre-processing/Isotropic/Noise/src/denoising_rgb/results/sigmahist.png"
plt.savefig(filesave)
if __name__ == "__main__":
sigma_list1, sigma_list2 = readImagefromFolder()
plotHistogram(sigma_list1, sigma_list2)
|
from parameterized import parameterized_class
from zkay.examples.examples import all_examples
from zkay.tests.utils.test_examples import TestExamples
from zkay.zkay_ast.build_ast import build_ast
from zkay.zkay_ast.visitor.deep_copy import deep_copy
@parameterized_class(('name', 'example'), all_examples)
class TestParentSetter(TestExamples):
def test_deep_copy(self):
ast = build_ast(self.example.code())
ast_2 = deep_copy(ast)
self.assertEqual(str(ast), str(ast_2))
|
import komand
from .schema import QueryInput, QueryOutput
# Custom imports below
import json
import ldap3
class Query(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='query',
description='Run a LDAP query',
input=QueryInput(),
output=QueryOutput())
def run(self, params={}):
conn = self.connection.conn
conn.search(search_base=params.get('search_base'),
search_filter=params.get('search_filter'),
attributes=[ldap3.ALL_ATTRIBUTES, ldap3.ALL_OPERATIONAL_ATTRIBUTES]
)
result_list_json = conn.response_to_json()
result_list_object = json.loads(result_list_json)
entries = result_list_object["entries"]
return {'results': entries}
|
# Generated with AdditionalFileFormatCode
#
from enum import Enum
from enum import auto
class AdditionalFileFormatCode(Enum):
""""""
BINARY_OUTPUT = auto()
ASCII_OUTPUT = auto()
def label(self):
if self == AdditionalFileFormatCode.BINARY_OUTPUT:
return "Binary format"
if self == AdditionalFileFormatCode.ASCII_OUTPUT:
return "ASCII format" |
# -*- coding: utf-8 -*-
"""Module with SecureCRT parser."""
from os.path import expanduser
class SecureCRTConfigParser(object):
"""SecureCRT xml parser."""
meta_sessions = ['Default']
def __init__(self, xml):
"""Construct parser instance."""
self.xml = xml
self.tree = {}
def parse_hosts(self):
"""Parse SecureCRT Sessions."""
sessions = self.get_element_by_name(
self.xml.getchildren(), 'Sessions'
).getchildren()
self.parse_sessions(sessions, self.tree)
return self.tree
def parse_sessions(self, sessions, parent_node):
"""Parse SecureCRT sessions."""
for session in sessions:
if session.get('name') not in self.meta_sessions:
if not self.is_session_group(session):
host = self.make_host(session)
if not host:
continue
parent_node[host['label']] = host
else:
parent_node[session.get('name')] = {'__group': True}
self.parse_sessions(
session.getchildren(),
parent_node[session.get('name')]
)
def is_session_group(self, session):
"""Check node element type"""
return self.get_element_by_name(
session.getchildren(), 'Hostname'
) is None
def parse_identity(self):
"""Parse SecureCRT SSH2 raw key."""
identity = self.get_element_by_name(
self.xml.getchildren(), 'SSH2'
)
if identity is None:
return None
identity_filename = self.get_element_by_name(
identity.getchildren(),
'Identity Filename V2'
)
if not self.check_attribute(identity_filename):
return None
path = identity_filename.text.split('/')
public_key_name = path[-1].split('::')[0]
private_key_name = public_key_name.split('.')[0]
if path[0].startswith('$'):
path.pop(0)
path.insert(0, expanduser("~"))
path[-1] = public_key_name
public_key_path = '/'.join(path)
path[-1] = private_key_name
private_key_path = '/'.join(path)
return private_key_path, public_key_path
def make_host(self, session):
"""Adapt SecureCRT Session to Termius host."""
session_attrs = session.getchildren()
hostname = self.get_element_by_name(session_attrs, 'Hostname')
port = self.get_element_by_name(session_attrs, '[SSH2] Port')
username = self.get_element_by_name(session_attrs, 'Username')
if not self.check_attribute(hostname):
return None
return {
'label': session.get('name'),
'hostname': hostname.text,
'port': port.text if self.check_attribute(port) else '22',
'username': username.text
if self.check_attribute(username) else None
}
def check_attribute(self, attr):
"""Check an attribute."""
return attr is not None and attr.text
def get_element_by_name(self, elements, name):
"""Get SecureCRT config block."""
for element in elements:
if element.get('name') == name:
return element
return None
|
import os
import click
import configobj
class configobj_provider:
"""
A parser for configobj configuration files
Parameters
----------
unrepr : bool
Controls whether the file should be parsed using configobj's unrepr
mode. Defaults to `True`.
section : str
If this is set to something other than the default of `None`, the
provider will look for a corresponding section inside the
configuration file and return only the values from that section.
"""
def __init__(self, unrepr=True, section=None):
self.unrepr = unrepr
self.section = section
"""
Parse and return the configuration parameters.
Parameters
----------
file_path : str
The path to the configuration file
cmd_name : str
The name of the click command
Returns
-------
dict
A dictionary containing the configuration parameters.
"""
def __call__(self, file_path, cmd_name):
config = configobj.ConfigObj(file_path, unrepr=self.unrepr)
if self.section:
config = config[self.section].dict()
return config
def configuration_option(*param_decls, **attrs):
"""
Adds configuration file support to a click application.
This will create an option of type `click.File` containing the path to a
configuration file. It will overwrite the default values for all other
click arguments or options with the corresponding value from the
configuration file.
The default name of the option is `--config`.
By default the configuration is read from a file with the name `config`
inside the configuration directory as determined by `click.get_app_dir`.
This decorator accepts the same arguments as `click.option` and
`click.Path`. In addition, the following keyword arguments are available:
cmd_name : str
The command name. This is used to determine the configuration
directory. Defaults to `ctx.info_name`
config_file_name : str
The name of the configuration file. Defaults to `config`
provider : callable
A callable that parses the configuration file and returns a dictionary
of the configuration parameters. Will be called as
`provider(file_path, cmd_name)`. Default: `configobj_provider()`
"""
def decorator(f):
def callback(ctx, param, value):
nonlocal cmd_name, config_file_name, saved_callback, provider
if not ctx.default_map:
ctx.default_map = {}
if not cmd_name:
cmd_name = ctx.info_name
default_value = os.path.join(
click.get_app_dir(cmd_name), config_file_name)
param.default = default_value
if not value:
value = default_value
try:
config = provider(value, cmd_name)
except Exception as e:
raise click.BadOptionUsage(
"Error reading configuration file: {}".format(e), ctx)
ctx.default_map.update(config)
return saved_callback(ctx, param,
value) if saved_callback else value
attrs.setdefault('is_eager', True)
attrs.setdefault('help', 'Read configuration from PATH.')
attrs.setdefault('expose_value', False)
cmd_name = attrs.pop('cmd_name', None)
config_file_name = attrs.pop('config_file_name', 'config')
provider = attrs.pop('provider', configobj_provider())
path_default_params = {
'exists': False,
'file_okay': True,
'dir_okay': False,
'writable': False,
'readable': True,
'resolve_path': False
}
path_params = {
k: attrs.pop(k, v)
for k, v in path_default_params.items()
}
attrs['type'] = click.Path(**path_params)
saved_callback = attrs.pop('callback', None)
attrs['callback'] = callback
return click.option(*(param_decls or ('--config', )), **attrs)(f)
return decorator
|
# -*- coding: utf-8 -*-
"""
@author : Wang Meng
@github : https://github.com/tianpangji
@software : PyCharm
@file : notification.py
@create : 2020/11/17 22:11
"""
import threading
import time
from django.conf import settings
from redis import StrictRedis
from monitor.models import OnlineUsers
def online_user_notifications():
"""
在线用户, redis key过期后空间通知
:return: None
"""
conn = StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=3, password=settings.REDIS_PWD)
pub_sub = conn.pubsub()
def user_offline(msg):
online_user_key = msg.get('data').decode()
user_list = str(online_user_key).split('_')
OnlineUsers.objects.filter(user=user_list[2], ip=user_list[3]).delete()
pub_sub.psubscribe(**{'__keyevent@3__:expired': user_offline})
while 1:
pub_sub.get_message()
time.sleep(0.2)
t = threading.Thread(target=online_user_notifications)
t.setDaemon(True)
t.start()
|
"""
Sphinx inplace translation.
Please put `xxx_zh_CN.rst` alongside `xxx.rst`. When language is set to `zh_CN`,
`xxx_zh_CN.rst` will be used in place of `xxx.rst`.
If translation does not exist, it will automatically fallback to the original files, without warning.
I write this based on the example of:
https://github.com/readthedocs/sphinxcontrib-multisrc/blob/master/sphinxcontrib/multisrc.py
"""
import os
import types
def builder_inited(app):
"""Event listener to set up multiple environments."""
patch_doc2path(app.env, app.config.language)
def patch_doc2path(env, language):
# patch doc2path so that it resolves to the correct language.
override_doc2path = env.doc2path
def doc2path(env, docname: str, base: bool = True):
path = override_doc2path(docname, base)
if language not in (None, 'en'):
# The language is set to another one
new_docname = f'{docname}_{language}'
new_path = override_doc2path(new_docname, base)
if os.path.exists(new_path):
return new_path
return path
env.doc2path = types.MethodType(doc2path, env)
def setup(app):
app.connect('builder-inited', builder_inited)
|
#%%
from __future__ import print_function
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.optimizer import Optimizer
from torchvision import datasets, transforms
from torch.autograd import Variable
from tqdm import tqdm
# import Simulated annealing optimizer
from sa import UniformSampler, GaussianSampler
#%%
# Training settings
# parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
# parser.add_argument('--batch-size', type=int, default=64, metavar='N',
# help='input batch size for training (default: 64)')
# parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
# help='input batch size for testing (default: 1000)')
# parser.add_argument('--epochs', type=int, default=10, metavar='N',
# help='number of epochs to train (default: 10)')
# parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
# help='learning rate (default: 0.01)')
# parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
# help='SGD momentum (default: 0.5)')
# parser.add_argument('--no-cuda', action='store_true', default=False,
# help='disables CUDA training')
# parser.add_argument('--seed', type=int, default=1, metavar='S',
# help='random seed (default: 1)')
# parser.add_argument('--log-interval', type=int, default=10, metavar='N',
# help='how many batches to wait before logging training status')
# args = parser.parse_args()
args = {'batch_size': 64,
'test_batch_size': 1000,
'epochs': 10,
'lr': 0.01,
'momentum': 0.9,
'no_cuda': False,
'seed': 1,
'log_interval': 10}
#%%
args['cuda'] = not args['no_cuda'] and torch.cuda.is_available()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
torch.manual_seed(args['seed'])
if args['cuda']:
torch.cuda.manual_seed(args['seed'])
#%%
kwargs = {'num_workers': 1, 'pin_memory': True} if args['cuda'] else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args['batch_size'], shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args['test_batch_size'], shuffle=True, **kwargs)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
model = Net()
if args['cuda']:
model.cuda()
class SimulatedAnnealing(Optimizer):
def __init__(self, params, sampler, tau0=1.0, anneal_rate=0.0003,
min_temp=1e-5, anneal_every=100000, hard=False, hard_rate=0.9, decay_rate=0.9):
defaults = dict(sampler=sampler, tau0=tau0, tau=tau0, anneal_rate=anneal_rate,
min_temp=min_temp, anneal_every=anneal_every,
hard=hard, hard_rate=hard_rate,
decay_rate = decay_rate,
iteration=0)
super(SimulatedAnnealing, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
if closure is None:
raise Exception("loss closure is required to do SA")
loss = closure()
for group in self.param_groups:
# the sampler samples randomness
# that is used in optimizations
sampler = group['sampler']
# clone all of the params to keep in case we need to swap back
cloned_params = [p.clone() for p in group['params']]
for p in group['params']:
# anneal tau if it matches the requirements
if group['iteration'] > 0 \
and group['iteration'] % group['anneal_every'] == 0:
if not group['hard']:
# smoother annealing: consider using this over hard annealing
rate = -group['anneal_rate'] * group['iteration']
group['tau'] = np.maximum(group['tau0'] * np.exp(rate),
group['min_temp'])
else:
# hard annealing
group['tau'] = np.maximum(group['hard_rate'] * group['tau'],
group['min_temp'])
decay_rate = np.exp(-group['decay_rate'])
random_perturbation = decay_rate*group['sampler'].sample(p.data.size())
p.data = p.data / torch.norm(p.data)
p.data.add_(random_perturbation)
group['iteration'] += 1
# re-evaluate the loss function with the perturbed params
# if we didn't accept the new params swap back and return
loss_perturbed = closure(weight=self.param_groups)
final_loss, is_swapped = self.anneal(loss, loss_perturbed, group['tau'])
if is_swapped:
for p, pbkp in zip(group['params'], cloned_params):
p.data = pbkp.data
return final_loss
def anneal(self, loss, loss_perturbed, tau):
'''returns loss, is_new_loss'''
def acceptance_prob(old, new, temp):
return torch.exp((old - new)/temp)
# print(loss_perturbed.shape)
if loss_perturbed.item() < loss.item():
return loss_perturbed, True
else:
# evaluate the metropolis criterion
ap = acceptance_prob(loss, loss_perturbed, tau)
print("old = ", loss.item(), "| pert = ", loss_perturbed.item(),
" | ap = ", ap.item(), " | tau = ", tau)
if ap.item() > np.random.rand():
return loss_perturbed, True
# return the original loss if above fails
# or if the temp is now annealed
return loss, False
# sampler = UniformSampler(minval=-0.5, maxval=0.5, cuda=args['cuda'])
sampler = GaussianSampler(mu=0, sigma=1, cuda=args['cuda'])
optimizer = SimulatedAnnealing(model.parameters(), sampler=sampler)
#%%
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if args['cuda']:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
def closure(weight=None):
if torch.is_grad_enabled():
optimizer.zero_grad()
if (weight is not None):
state_dict = model.state_dict()
keys = list(state_dict.keys())
for key_idx, key in enumerate(keys):
# print(state_dict[key])
# print(weight_new[0]['params'])
state_dict[key] = weight[0]['params'][key_idx]
model.load_state_dict(state_dict)
outputs = model(data)
loss = F.nll_loss(outputs, target)
if loss.requires_grad:
loss.backward(retain_graph=True)
return loss
loss = optimizer.step(closure)
if batch_idx % args['log_interval'] == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args['cuda']:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
for epoch in range(1, args['epochs'] + 1):
train(epoch)
test()
# %%
|
#!/usr/bin/env python
##Copyright 2008-2013 Jelle Feringa ([email protected])
##
##This file is part of pythonOCC.
##
##pythonOCC is free software: you can redistribute it and/or modify
##it under the terms of the GNU Lesser General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##pythonOCC is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU Lesser General Public License for more details.
##
##You should have received a copy of the GNU Lesser General Public License
##along with pythonOCC. If not, see <http://www.gnu.org/licenses/>
from OCC.Core.TopoDS import TopoDS_Wire
from OCCUtils.base import BaseObject
class Wire(TopoDS_Wire, BaseObject):
def __init__(self, wire):
'''
'''
assert isinstance(wire, TopoDS_Wire), 'need a TopoDS_Wire, got a %s' % wire.__class__
assert not wire.IsNull()
super(Wire, self).__init__()
BaseObject.__init__(self, 'wire')
# we need to copy the base shape using the following three
# lines
assert self.IsNull()
self.TShape(wire.TShape())
self.Location(wire.Location())
self.Orientation(wire.Orientation())
assert not self.IsNull()
|
#!/usr/bin/env python
#This is the Modelling Code for ITU Rover Team
##This code takes pictures with pressing space bar and mark the gps data to their exif's.
###This code is the primary code for modelling and scaling for science task that will be done on another operating system.
import numpy as np
import imutils
import rospy
from std_msgs.msg import String
from geometry_msgs.msg import Twist
def main():
twist = Twist()
twist.linear.x = 0
twist.angular.z = 0
count = 0
rospy.init_node('cmd_vel_zero')
rate = rospy.Rate(10)
pub = rospy.Publisher('/cmd_vel',Twist,queue_size=10)
while not rospy.is_shutdown():
pub.publish(twist)
count +=1
if count >5:
break
rate.sleep()
if __name__ == '__main__':
main() |
import click
from .. import di
from .. import tag
cli = click.Group('tag')
OK_COLOR = 'green'
WARN_COLOR = 'yellow'
ERR_COLOR = 'red'
TAG_STATUS_COLORS = {
tag.TagStatus.correct: OK_COLOR,
tag.TagStatus.incorrect: ERR_COLOR,
tag.TagStatus.missing: ERR_COLOR,
}
def tags_with_status(tags, desired_status):
return [tag_name
for tag_name, tag_status in tags.items()
if tag_status == desired_status]
def describe_tags_status(tags_status):
output = []
for status_name, status in (
('missing', tag.TagStatus.missing),
('incorrect', tag.TagStatus.incorrect),
('correct', tag.TagStatus.correct)):
matching_tags = tags_with_status(tags_status, status)
if not matching_tags:
continue
color = TAG_STATUS_COLORS[status]
output_str = '{n} tags {status_name}'.format(
n=len(matching_tags),
status_name=status_name)
output.append(
click.style(output_str, fg=color))
return ', '.join(output)
def tags_not_correct_by_inst(instances_tags_status):
return {
inst: [
tag_name for tag_name, tag_status in tags.items()
if tag_status is not tag.TagStatus.correct]
for inst, tags in instances_tags_status.items()}
def inst_with_tags_not_correct(instances_tags_status):
return [inst for inst, tags
in tags_not_correct_by_inst(instances_tags_status).items()
if len(tags)]
def tags_not_correct(instances_tags_status):
return sum(
tags_not_correct_by_inst(instances_tags_status).values(), [])
@cli.command()
def check():
tags = tag.get_required_tags()
instances = tag.get_instances_for_config()
instances_tags_status = tag.get_instances_tags_status(instances, tags)
for inst in instances:
click.echo('Instance: {}'.format(inst.id))
inst_name = inst.tags.get('Name')
if inst_name:
click.echo(' Name: {}'.format(inst_name))
inst_descript = inst.tags.get('Description')
if inst_descript:
click.echo(' Description: {}'.format(inst_descript))
tags_status = instances_tags_status[inst]
click.echo(describe_tags_status(tags_status))
click.echo()
inst_need_tagging = inst_with_tags_not_correct(instances_tags_status)
tags_to_apply = tags_not_correct(instances_tags_status)
if inst_need_tagging or tags_to_apply:
click.secho(
'{} instances need tagging \n'
'{} tags total need to be applied'.format(
len(inst_need_tagging), len(tags_to_apply)),
fg=WARN_COLOR)
else:
click.secho('No tagging needed', fg=OK_COLOR)
@cli.command()
def apply():
tags = tag.get_required_tags()
instances = tag.get_instances_for_config()
instances_tags_status = tag.get_instances_tags_status(instances, tags)
inst_need_tagging = inst_with_tags_not_correct(instances_tags_status)
for inst in inst_need_tagging:
click.echo('Adding tags for {}'.format(inst.id))
inst.add_tags(tags)
|
# -*- coding: utf-8 -*-
"""
Helper functions used in views.
"""
import calendar
import csv
import logging
import time
import threading
from collections import OrderedDict
from datetime import datetime
from functools import wraps
import xml.etree.ElementTree as ET
from json import dumps
from flask import Response
from presence_analyzer.main import app
log = logging.getLogger(__name__) # pylint: disable=invalid-name
storage_cache = {}
lock = threading.Lock()
def jsonify(function):
"""
Creates a response with the JSON representation of wrapped function result.
"""
@wraps(function)
def inner(*args, **kwargs):
"""
This docstring will be overridden by @wraps decorator.
"""
return Response(
dumps(function(*args, **kwargs)),
mimetype='application/json'
)
return inner
def memoize(storage=storage_cache, age_cache=0):
"""
Caching function.
"""
def _memoize(function):
with lock:
def __memoize(*args, **kw):
key = function.__name__
try:
expired = (
age_cache != 0 and
(storage[key]['expire_time'] + age_cache) <
time.time())
except KeyError:
expired = True
if not expired:
return storage[key]['values']
storage[key] = {
'expire_time': time.time(),
'values': function(*args, **kw)
}
return storage[key]['values']
return __memoize
return _memoize
@memoize(age_cache=600)
def get_data():
"""
Extracts presence data from CSV file and groups it by user_id.
It creates structure like this:
data = {
'user_id': {
datetime.date(2013, 10, 1): {
'start': datetime.time(9, 0, 0),
'end': datetime.time(17, 30, 0),
},
datetime.date(2013, 10, 2): {
'start': datetime.time(8, 30, 0),
'end': datetime.time(16, 45, 0),
},
}
}
"""
data = {}
with open(app.config['DATA_CSV'], 'r') as csvfile:
presence_reader = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(presence_reader):
if len(row) != 4:
# ignore header and footer lines
continue
try:
user_id = int(row[0])
date = datetime.strptime(row[1], '%Y-%m-%d').date()
start = datetime.strptime(row[2], '%H:%M:%S').time()
end = datetime.strptime(row[3], '%H:%M:%S').time()
except (ValueError, TypeError):
log.debug('Problem with line %d: ', i, exc_info=True)
data.setdefault(user_id, {})[date] = {'start': start, 'end': end}
return data
def xml_translator():
"""
Extracts user data from XML file.
"""
tree = ET.parse(app.config['XML_DATA'])
root = tree.getroot()
root_server = root.find('server')
protocol = root_server.find('protocol').text
host = root_server.find('host').text
port = root_server.find('port').text
url = protocol + '://' + host + ':' + port
root_user = [root.find('users')]
data = {}
for user in root_user[0].findall('user'):
name = user.find('name').text
avatar = user.find('avatar').text
id_user = user.get('id')
data[int(id_user)] = {'name': name, 'avatar': url + avatar}
return data
def group_by_weekday(items):
"""
Groups presence entries by weekday.
"""
result = [[], [], [], [], [], [], []] # one list for every day in week
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()].append(interval(start, end))
return result
def seconds_since_midnight(date):
"""
Calculates amount of seconds since midnight.
"""
return date.hour * 3600 + date.minute * 60 + date.second
def interval(start, end):
"""
Calculates inverval in seconds between two datetime.time objects.
"""
return seconds_since_midnight(end) - seconds_since_midnight(start)
def mean(items):
"""
Calculates arithmetic mean. Returns zero for empty lists.
"""
return float(sum(items)) / len(items) if len(items) > 0 else 0
def day_start_end(items):
"""
Groups times of start and end work at weekday.
"""
weekdays = [[] for day in xrange(7)]
for date in items:
start = seconds_since_midnight(items[date]['start'])
end = seconds_since_midnight(items[date]['end'])
weekdays[date.weekday()].append([start, end])
days = calendar.day_abbr
results = []
for day in days:
start = []
end = []
for item in weekdays[len(results)]:
if item != []:
start.append(item[0])
end.append(item[1])
results.append([day, mean(start), mean(end)])
return results
def podium_data_maker(user):
"""
Groups presence entries as podium data.
"""
months = [[] for month in xrange(12)]
for item in user:
start = user[item]['start']
end = user[item]['end']
months[item.month].append(interval(start, end))
months[item.month] = [sum(months[item.month])]
results = podium_result_structure_builder(months)
return sorted(results, key=lambda time: time[1])
def podium_result_structure_builder(months):
"""
Building results for podium template.
"""
results = []
for item in months:
try:
results.append(
[
calendar.month_name[months.index(item)],
item[0] / 3600
]
)
except:
results.append(['no data', 0])
return results
def months_sum_dict(year, items, item, user, months):
"""
Append and sum time for every month.
"""
if item.year == year:
start = items[user][item]['start']
end = items[user][item]['end']
months[item.month].append(interval(start, end))
months[item.month] = [sum(months[item.month])]
else:
pass
return months
def user_validate(months_sum, user):
"""
Check if user exist.
"""
result = []
try:
xml_translator()[user]
if months_sum == []:
pass
else:
result.append({user: months_sum})
result = result[0]
except:
pass
return result
def group_by_month(items, year):
"""
Groups presence entries by month.
"""
results = []
for user in items:
months = [[] for month in xrange(13)]
for item in items[user]:
months_sum = months_sum_dict(year, items, item, user, months)
results.append(user_validate(months_sum, user))
return results
def sorted_months_dict(dict_months):
"""
Sort months dict.
"""
sorted_dict = OrderedDict(
sorted(
dict_months,
key=lambda x: x[1],
reverse=True
)
)
return sorted_dict
def five_top_user_data(dict_months, sorted_dict):
"""
Collect data and append it to the top 5 user.
"""
id_top = list(sorted_dict.keys())[:5]
results = []
for item in id_top:
if dict(dict_months)[item] == 0 or len(id_top) < 5:
return results
else:
try:
results.append(
{
'user_id': item,
'hours': dict(dict_months)[item][0] / 3600,
'name': xml_translator()[item]['name'],
'avatar': xml_translator()[item]['avatar']
}
)
except:
return results
return results
def five_top_workers(month, year):
"""
Top 5 presence users with information about them.
"""
dict_months = []
monthly_grouped = group_by_month(get_data(), year)
for user in monthly_grouped:
try:
dict_months.append((user.items()[0][0], user.items()[0][1][month]))
except:
pass
sorted_dict = sorted_months_dict(dict_months)
return five_top_user_data(dict_months, sorted_dict)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.