max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/attacks/class_test.py | henrik997/privacy-evaluator | 0 | 4400 | import pytest
from privacy_evaluator.attacks.sample_attack import Sample_Attack
"""
This test only test if no error is thrown when calling the function, can be removed in the future
"""
def test_sample_attack():
test = Sample_Attack(0, 0, 0)
test.perform_attack()
| 2.578125 | 3 |
setup.py | Oli2/presto-python-client | 0 | 4401 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import re
from setuptools import setup
import textwrap
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('prestodb/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='presto-python-client',
author='<NAME>',
author_email='<EMAIL>',
version=version,
url='https://github.com/prestodb/presto-python-client',
packages=['prestodb'],
package_data={'': ['LICENSE', 'README.md']},
description='Client for the Presto distributed SQL Engine',
long_description=textwrap.dedent("""
Client for Presto (https://prestodb.io), a distributed SQL engine for
interactive and batch big data processing. Provides a low-level client and
a DBAPI 2.0 implementation.
"""),
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Database :: Front-Ends',
],
install_requires=[
'click',
'future',
'ipaddress',
'requests',
'requests_kerberos',
'six',
'typing',
],
extras_require={'tests':[
'httpretty',
'pytest',
'pytest-runner',
]}
)
| 1.484375 | 1 |
Graphs/Pie Chart.py | TausifAnsari/PyHub | 1 | 4402 | <gh_stars>1-10
import matplotlib.pyplot as graph
subject = ["Probability", "Calculas", "Discrete Mathematics", "Adv Engineering Mathematics",
"Linear Algebra", "Cryptography"]
weightage = [250,900,850,1200,290,345]
seperator = [0.05,0,0,0,0.05,0.05]
graph.title("Mathematics Topic Weightage")
graph.pie(weightage,labels=subject,autopct="%0.1f%%", explode=seperator)
graph.show() | 2.703125 | 3 |
exercises/perform_model_selection.py | noavilk/IML.HUJI | 0 | 4403 | from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso
from utils import *
import plotnine as gg
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
def f(x):
return (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
X = np.linspace(-1.2, 2, n_samples)
y = f(X) + np.random.normal(0, noise, n_samples)
train_X, train_y, test_X, test_y = split_train_test(pd.DataFrame(X), pd.Series(y), train_proportion=(2 / 3))
df_train = pd.DataFrame({"x": train_X.squeeze(), "y": train_y, "type": "Train"})
df_test = pd.DataFrame({"x": test_X.squeeze(), "y": test_y, "type": "test"})
x_stat = np.linspace(-1.4, 2, 100)
df_stat = pd.DataFrame({"x": x_stat, "y": f(x_stat), "type": "Model"})
df = pd.concat([df_test, df_train])
title = f"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise ~ N(0,{noise})"
p = gg.ggplot() + \
gg.geom_point(df, gg.aes("x", "y", color="type")) + \
gg.geom_line(df_stat, gg.aes("x", "y")) + \
gg.theme_bw() + \
gg.ggtitle(title)
# print(p)
gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)
# Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10
train_err = []
validation_err = []
for k in range(11):
pf = PolynomialFitting(k)
train_score, validation_score = cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), mean_square_error)
train_err.append(train_score)
validation_err.append(validation_score)
df1 = pd.DataFrame({"k": range(11), "avg error": train_err, "type": "train error"})
df2 = pd.DataFrame({"k": range(11), "avg error": validation_err, "type": "validation error"})
df = pd.concat([df1, df2])
title = f" Cross Validation for Polynomial Fitting Over Different Degrees k"
p = gg.ggplot(df, gg.aes("k", "avg error", color="type")) + \
gg.geom_point() + \
gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) + \
gg.labs(y="Average training and validation errors",
title=f"{title} \nWith Noise: {noise}, Num of samples: {n_samples}")
gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png', plot=p, verbose=False)
# Question 3 - Using best value of k, fit a k-degree polynomial model and report test error
best_k = np.argmin(np.array(validation_err))
pf = PolynomialFitting(int(best_k))
pf.fit(train_X.to_numpy(), train_y.to_numpy())
y_pred = pf.predict(test_X.to_numpy())
print("best k =", best_k)
print("Test = ", round(mean_square_error(test_y.to_numpy(), y_pred), 2))
print("Validation = ", round(validation_err[best_k], 2))
def select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500):
"""
Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter
values for Ridge and Lasso regressions
Parameters
----------
n_samples: int, default=50
Number of samples to generate
n_evaluations: int, default = 500
Number of regularization parameter values to evaluate for each of the algorithms
"""
# Question 6 - Load diabetes dataset and split into training and testing portions
X, y = datasets.load_diabetes(return_X_y=True, as_frame=True)
train_X, train_y, test_X, test_y = X.iloc[:50, :], y[:50], X.iloc[50:, ], y[50:]
# Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions
for name, learner, ran in [("Ridge", RidgeRegression, np.linspace(0.001, 0.05, 500)),
("Lasso", Lasso, np.linspace(0.001, 0.5, 500))]:
train_err = []
validation_err = []
for lam in ran:
rg = learner(lam)
train_score, validation_score = cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(),
mean_square_error)
train_err.append(train_score)
validation_err.append(validation_score)
df1 = pd.DataFrame({"lambda": ran, "avg error": train_err, "type": "train error"})
df2 = pd.DataFrame({"lambda": ran, "avg error": validation_err, "type": "validation error"})
df = pd.concat([df1, df2])
title = f"{name} Regularization Cross Validate Over Different Lambda"
p = gg.ggplot(df, gg.aes("lambda", "avg error", color="type")) + \
gg.geom_line() + \
gg.theme_bw() + gg.labs(y="Average training and validation errors", title=title)
gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)
# Question 8 - Compare best Ridge model, best Lasso model and Least Squares model
best_lam = np.argmin(np.array(validation_err))
rg = learner(ran[best_lam])
rg.fit(train_X.to_numpy(), train_y.to_numpy())
y_pred = rg.predict(test_X.to_numpy())
print(f"best lambda {name} = {round(ran[best_lam], 3)}")
print(f"Test MSE {name} = {round(mean_square_error(test_y.to_numpy(), y_pred), 2)}")
lr = LinearRegression()
lr.fit(train_X.to_numpy(), train_y.to_numpy())
print("Linear Regression Loss = ", lr.loss(test_X.to_numpy(), test_y.to_numpy()))
if __name__ == '__main__':
np.random.seed(0)
select_polynomial_degree()
select_polynomial_degree(noise=0)
select_polynomial_degree(n_samples=1500, noise=10)
select_regularization_parameter()
| 3.046875 | 3 |
libraries/tools/media_utils.py | unfoldingWord-dev/d43-catalog | 1 | 4404 | import re
import copy
def parse_media(media, content_version, project_chapters):
"""
Converts a media object into formats usable in the catalog
:param media: the media object
:type media: dict
:param content_version: the current version of the source content
:type content_version: string
:param project_chapters: a dictionary of project chapters
:type project_chapters: dict
:return: resource_formats, project_formats a list of resource formats and dictionary of project formats
"""
resource_formats = []
project_formats = {}
if 'resource' in media:
resource_formats = _parse_resource(media['resource'], content_version)
if 'projects' in media:
for project in media['projects']:
project_id = project['identifier']
chapters = []
if project_id == 'obs':
# TRICKY: obs projects always have 50 chapters
# This allows empty projects to still publish media.
for x in range(1, 51): # chapters 1..50
chapters.append(str(x).zfill(2))
if project_id in project_chapters:
chapters = project_chapters[project_id]
project_formats[project_id] = _parse_project(project, content_version, chapters)
return resource_formats, project_formats
def _parse_resource(resource, content_version):
"""
Converts a resource media object into formats usable in the catalog
:param resource: the media object
:type resource: dict
:param content_version: the current version of the source content
:type content_version: string
:return: a list of formats
"""
source_version = _expand_keys(resource['version'], {'latest': content_version})
formats = []
if 'media' in resource:
for media in resource['media']:
media_version = _expand_keys(media['version'], {'latest': content_version})
expansion_vars = _make_expansion_variables(media, content_version)
if 'quality' in media and len(media['quality']) > 0:
# build format for each quality
for quality in media['quality']:
expansion_vars['quality'] = quality
format = _make_format(source_version=source_version,
media_version=media_version,
quality=quality,
media=media,
expansion_vars=expansion_vars)
formats.append(format)
else:
# build a single format
format = _make_format(source_version=source_version,
media_version=media_version,
quality=None,
media=media,
expansion_vars=expansion_vars)
formats.append(format)
return formats
def _make_format(source_version, media_version, quality, media, expansion_vars):
format = {
'format': '',
'modified': '',
'size': 0,
'source_version': '{}'.format(source_version),
'version': '{}'.format(media_version),
'contributor': media['contributor'],
'url': _expand_keys(media['url'], expansion_vars),
'signature': '',
'build_rules': [
'signing.sign_given_url'
]
}
if quality:
format['quality'] = quality
return format
def _parse_project(project, content_version, chapters_ids):
"""
Converts a project media object into formats usable in the catalog
:param project: the media object
:type project: dict
:param content_version: the current version of the source content
:type content_version: string
:param chapters_ids: a list of chapter identifiers in the project
:type chapters_ids: list
:return: a list of formats
"""
source_version = _expand_keys(project['version'], {'latest': content_version})
formats = []
if 'media' in project:
for media in project['media']:
media_version = _expand_keys(media['version'], {'latest': content_version})
expansion_vars = _make_expansion_variables(media, content_version)
if 'quality' in media and len(media['quality']) > 0:
# build format for each quality
for quality in media['quality']:
expansion_vars['quality'] = quality
format = _make_format(source_version=source_version,
media_version=media_version,
quality=quality,
media=media,
expansion_vars=expansion_vars)
chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars)
if chapters:
format['chapters'] = chapters
formats.append(format)
else:
# build single format
format = _make_format(source_version=source_version,
media_version=media_version,
quality=None,
media=media,
expansion_vars=expansion_vars)
chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars)
if chapters:
format['chapters'] = chapters
formats.append(format)
return formats
def _prepare_chapter_formats(media, chapters, expansion_vars):
"""
This is a wrapper around the method `_parse_project_chapter`.
Since we routinely conditionally prepare chapters in multiple places
this handles it in one place
:param media: the media object to inspect
:param chapters: a list of chapter ids
:param expansion_vars: a dictionary of variables that may be expanded in the chapter url
:return:
"""
if 'chapter_url' in media:
chapter_url = _expand_keys(media['chapter_url'], expansion_vars)
chapters = _parse_project_chapter(chapter_url, chapters)
if chapters:
return chapters
return None
def _parse_project_chapter(chapter_url, chapters):
"""
Generates chapter formats for use in the catalog
:param chapter_url: the url template that will be used in the formats
:param chapters: a list of chapter ids
:type chapters: list
:return:
"""
# TODO: this requires that we give a well formatted list of chapter ids and check if the Rc is a book
# only book RCs can have chapter formats
formats = []
for chapter_id in chapters:
format = {
'size': 0,
'length': 0,
'modified': '',
'identifier': chapter_id,
'url': _expand_keys(chapter_url, {'chapter': chapter_id}),
'signature': '',
'build_rules': [
'signing.sign_given_url'
]
}
formats.append(format)
return formats
def _make_expansion_variables(media_block, content_version):
"""
Creates a dictionary of expansion variables for media items.
:param self:
:param media_block:
:param content_version:
:return:
"""
vars = copy.copy(media_block)
# strip black listed keys
black_list = ['url', 'chapter_url']
for key in black_list:
if key in vars:
del vars[key]
# TRICKY: using `latest` as an expansion variable in urls is not explicitly stated in the spec,
# but it's a common misunderstanding so we allow it.
vars['latest'] = '{}'.format(content_version)
return vars
def _expand_keys(target, replacements):
"""
Replaces all the dict keys found in the string with the dict values.
Keys in the string must be delimited by brackets {}
:param target:
:param replacements:
:return:
"""
if isinstance(target, basestring) or isinstance(target, str):
result = target
if not isinstance(replacements, dict):
raise Exception('Expected dictionary of replacements but received {}'.format(type(replacements)))
for key in replacements:
if not isinstance(replacements[key], list):
result = re.sub(r'{\s*' + key + '\s*}', '{}'.format(replacements[key]), result)
return result
elif isinstance(target, int):
return target
else:
raise Exception('Invalid replacement target "{}". Expected string but received {}'.format(target, type(target)))
| 3.015625 | 3 |
django_customflow/mixins.py | Brad19940809/django-customflow | 1 | 4405 | # -*- coding:utf-8 -*-
# create_time: 2019/8/5 16:02
# __author__ = 'brad'
from . import utils
from .tasks.base import WaitingTask, BaseTask
class WorkflowMixin(object):
"""Mixin class to make objects workflow aware.
"""
def get_workflow(self):
"""Returns the current workflow of the object.
"""
return utils.get_workflow(self)
def remove_workflow(self):
"""Removes the workflow from the object. After this function has been
called the object has no *own* workflow anymore (it might have one via
its content type).
"""
return utils.remove_workflow_from_object(self)
def set_workflow(self, workflow):
"""Sets the passed workflow to the object. This will set the local
workflow for the object.
If the object has already the given workflow nothing happens.
Otherwise the object gets the passed workflow and the state is set to
the workflow's initial state.
**Parameters:**
workflow
The workflow which should be set to the object. Can be a Workflow
instance or a string with the workflow name.
obj
The object which gets the passed workflow.
"""
return utils.set_workflow_for_object(self, workflow)
def get_state(self):
"""Returns the current workflow state of the object.
"""
return utils.get_state(self)
def set_state(self, state):
"""Sets the workflow state of the object.
"""
return utils.set_state(self, state)
def set_initial_state(self):
"""Sets the initial state of the current workflow to the object.
"""
return self.set_state(self.get_workflow().initial_state)
def do_transition(self, transition, user):
"""Processes the passed transition (if allowed).
"""
return utils.do_transition(self, transition, user)
def do_next_state(self):
if self.state_is_waiting():
print("state is waiting! please use method .state_end_waiting() when the WaitingTask has finished.")
state = self.get_state()
transitions = state.transitions.all()
# info:这里代表状态节点是最后的一层了
if not transitions:
print(state.name, "is the end state")
return False
for transition in transitions:
if transition.condition:
cond = utils.import_from_string(transition.condition)
# todo:目前这里是轮询到条件正确的一个, 就跳出轮询设置状态了
if not cond().run(self, transition):
continue
if transition.task:
# todo:task是顺序还是异步执行, 还是有前向倚赖,这个需要确定完善
task = utils.import_from_string(transition.task)()
if not isinstance(task, (BaseTask, WaitingTask)):
raise TypeError("This task is not Basetask or WaitingTask instance")
task.run(self, transition)
next_state_instance = transition.destination
self.set_state(next_state_instance)
# info:This is the waiting task setting.
if transition.task and isinstance(task, WaitingTask):
self.state_set_waiting()
# info:记录日志
self.set_log(state=next_state_instance.name, source_state=state.name, transition=transition.name)
# todo:这个是遍历操作, 只要是设置为下一个状态不需要手动操作, 就在这里执行
if not next_state_instance.manual:
return self.do_next_state()
return True
def set_log(self, state, source_state=None, transition=None):
return utils.set_log(self, state, source_state, transition)
def get_log(self):
return utils.get_log(self)
def workflow_is_finished(self):
state = self.get_state()
if not state.transitions.all():
return True
else:
return False
def state_is_waiting(self):
return utils.get_state_relation(self).waiting
def state_end_waiting(self):
state_relation = utils.get_state_relation(self)
if not state_relation.waiting:
print("there is no need to set")
return None
state_relation.waiting = False
state_relation.save()
def state_set_waiting(self):
state_relation = utils.get_state_relation(self)
if state_relation.waiting:
print("there is no need to set")
return None
state_relation.waiting = True
state_relation.save()
| 2.8125 | 3 |
video_encoding/fields.py | fossabot/django-video-encoding | 164 | 4406 | from django.db.models.fields.files import (FieldFile, ImageField,
ImageFileDescriptor)
from django.utils.translation import ugettext as _
from .backends import get_backend_class
from .files import VideoFile
class VideoFileDescriptor(ImageFileDescriptor):
pass
class VideoFieldFile(VideoFile, FieldFile):
def delete(self, save=True):
# Clear the video info cache
if hasattr(self, '_info_cache'):
del self._info_cache
super(VideoFieldFile, self).delete(save=save)
class VideoField(ImageField):
attr_class = VideoFieldFile
descriptor_class = VideoFileDescriptor
description = _("Video")
def __init__(self, verbose_name=None, name=None, duration_field=None,
**kwargs):
self.duration_field = duration_field
super(VideoField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_backend())
return errors
def _check_backend(self):
backend = get_backend_class()
return backend.check()
def to_python(self, data):
# use FileField method
return super(ImageField, self).to_python(data)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
_file = getattr(instance, self.attname)
# we need a real file
if not _file._committed:
return
# write `width` and `height`
super(VideoField, self).update_dimension_fields(instance, force,
*args, **kwargs)
if not self.duration_field:
return
# Nothing to update if we have no file and not being forced to update.
if not _file and not force:
return
if getattr(instance, self.duration_field) and not force:
return
# get duration if file is defined
duration = _file.duration if _file else None
# update duration
setattr(instance, self.duration_field, duration)
def formfield(self, **kwargs):
# use normal FileFieldWidget for now
return super(ImageField, self).formfield(**kwargs)
| 2.109375 | 2 |
BST.py | boristown/leetcode | 1 | 4407 | <filename>BST.py
class BST:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
@staticmethod
def array2BST(array):
'''
array:sorted array
'''
n = len(array)
if n == 0: return None
m = n//2
left,root,right = array[:m],array[m],array[m+1:]
return BST(root,BST.array2BST(left),BST.array2BST(right))
@staticmethod
def BST2array(node):
'''
node:BST node
'''
if not node: return []
return BST.BST2array(node.left)+[node.val]+BST.BST2array(node.right) | 3.65625 | 4 |
test/spec/test_spec.py | raghu1121/SLM-Lab | 1 | 4408 | <gh_stars>1-10
from flaky import flaky
from slm_lab.experiment.control import Trial
from slm_lab.experiment.monitor import InfoSpace
from slm_lab.lib import util
from slm_lab.spec import spec_util
import os
import pandas as pd
import pytest
import sys
# helper method to run all tests in test_spec
def run_trial_test(spec_file, spec_name=False):
spec = spec_util.get(spec_file, spec_name)
spec = spec_util.override_test_spec(spec)
info_space = InfoSpace()
info_space.tick('trial')
trial = Trial(spec, info_space)
trial_data = trial.run()
assert isinstance(trial_data, pd.DataFrame)
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/reinforce.json', 'reinforce_mlp_cartpole'),
('experimental/reinforce.json', 'reinforce_rnn_cartpole'),
# ('experimental/reinforce.json', 'reinforce_conv_breakout'),
])
def test_reinforce(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/reinforce.json', 'reinforce_mlp_pendulum'),
('experimental/reinforce.json', 'reinforce_rnn_pendulum'),
])
def test_reinforce_cont(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/a2c.json', 'a2c_mlp_shared_cartpole'),
('experimental/a2c.json', 'a2c_mlp_separate_cartpole'),
('experimental/a2c.json', 'a2c_rnn_shared_cartpole'),
('experimental/a2c.json', 'a2c_rnn_separate_cartpole'),
# ('experimental/a2c.json', 'a2c_conv_shared_breakout'),
# ('experimental/a2c.json', 'a2c_conv_separate_breakout'),
('experimental/a2c.json', 'a2c_mlp_concat_cartpole'),
])
def test_a2c(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/a2c.json', 'a2c_mlp_shared_pendulum'),
('experimental/a2c.json', 'a2c_mlp_separate_pendulum'),
('experimental/a2c.json', 'a2c_rnn_shared_pendulum'),
('experimental/a2c.json', 'a2c_rnn_separate_pendulum'),
])
def test_a2c_cont(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/ppo.json', 'ppo_mlp_shared_cartpole'),
('experimental/ppo.json', 'ppo_mlp_separate_cartpole'),
('experimental/ppo.json', 'ppo_rnn_shared_cartpole'),
('experimental/ppo.json', 'ppo_rnn_separate_cartpole'),
# ('experimental/ppo.json', 'ppo_conv_shared_breakout'),
# ('experimental/ppo.json', 'ppo_conv_separate_breakout'),
])
def test_ppo(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/ppo.json', 'ppo_mlp_shared_pendulum'),
('experimental/ppo.json', 'ppo_mlp_separate_pendulum'),
('experimental/ppo.json', 'ppo_rnn_shared_pendulum'),
('experimental/ppo.json', 'ppo_rnn_separate_pendulum'),
])
def test_ppo_cont(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@flaky
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'),
('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'),
('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'),
('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole'),
])
def test_ppo_sil(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@flaky
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'),
('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'),
('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'),
('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum'),
])
def test_ppo_sil_cont(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@flaky
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/sil.json', 'sil_mlp_shared_cartpole'),
('experimental/sil.json', 'sil_mlp_separate_cartpole'),
('experimental/sil.json', 'sil_rnn_shared_cartpole'),
('experimental/sil.json', 'sil_rnn_separate_cartpole'),
# ('experimental/sil.json', 'sil_conv_shared_breakout'),
# ('experimental/sil.json', 'sil_conv_separate_breakout'),
])
def test_sil(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@flaky
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/sil.json', 'sil_mlp_shared_pendulum'),
('experimental/sil.json', 'sil_mlp_separate_pendulum'),
('experimental/sil.json', 'sil_rnn_shared_pendulum'),
('experimental/sil.json', 'sil_rnn_separate_pendulum'),
])
def test_sil_cont(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/sarsa.json', 'sarsa_mlp_boltzmann_cartpole'),
('experimental/sarsa.json', 'sarsa_mlp_epsilon_greedy_cartpole'),
('experimental/sarsa.json', 'sarsa_rnn_boltzmann_cartpole'),
('experimental/sarsa.json', 'sarsa_rnn_epsilon_greedy_cartpole'),
# ('experimental/sarsa.json', 'sarsa_conv_boltzmann_breakout'),
# ('experimental/sarsa.json', 'sarsa_conv_epsilon_greedy_breakout'),
])
def test_sarsa(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/dqn.json', 'vanilla_dqn_cartpole'),
('experimental/dqn.json', 'dqn_boltzmann_cartpole'),
('experimental/dqn.json', 'dqn_epsilon_greedy_cartpole'),
('experimental/dqn.json', 'drqn_boltzmann_cartpole'),
('experimental/dqn.json', 'drqn_epsilon_greedy_cartpole'),
# ('experimental/dqn.json', 'dqn_boltzmann_breakout'),
# ('experimental/dqn.json', 'dqn_epsilon_greedy_breakout'),
('experimental/dqn.json', 'dqn_stack_epsilon_greedy_lunar'),
])
def test_dqn(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/ddqn.json', 'ddqn_boltzmann_cartpole'),
('experimental/ddqn.json', 'ddqn_epsilon_greedy_cartpole'),
('experimental/ddqn.json', 'ddrqn_boltzmann_cartpole'),
('experimental/ddqn.json', 'ddrqn_epsilon_greedy_cartpole'),
# ('experimental/ddqn.json', 'ddqn_boltzmann_breakout'),
# ('experimental/ddqn.json', 'ddqn_epsilon_greedy_breakout'),
])
def test_ddqn(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'),
('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole'),
# ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_breakout'),
# ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_breakout'),
])
def test_dueling_dqn(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'),
('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole'),
# ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole_2dball'),
])
def test_hydra_dqn(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@flaky
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/dqn.json', 'dqn_pong'),
# ('experimental/a2c.json', 'a2c_pong'),
])
def test_atari(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@pytest.mark.parametrize('spec_file,spec_name', [
('experimental/reinforce.json', 'reinforce_conv_vizdoom'),
])
def test_reinforce_vizdoom(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@pytest.mark.parametrize('spec_file,spec_name', [
('base.json', 'base_case_unity'),
('base.json', 'base_case_openai'),
('random.json', 'random_cartpole'),
('random.json', 'random_pendulum'),
# ('base.json', 'multi_agent'),
# ('base.json', 'multi_agent_multi_env'),
])
def test_base(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
@pytest.mark.parametrize('spec_file,spec_name', [
('base.json', 'multi_body'),
('base.json', 'multi_env'),
])
def test_base_multi(spec_file, spec_name):
run_trial_test(spec_file, spec_name)
| 2.015625 | 2 |
test/test_modify_group.py | Sfairat00/training_python | 0 | 4409 | from model.group import Group
def test_modify_group_name(app):
if app.group.count() == 0:
app.group.create(Group(name="test"))
old_groups = app.group.get_group_list()
app.group.modify_first_group(Group(name="New group"))
new_groups = app.group.get_group_list()
assert len(old_groups) == len(new_groups)
def test_modify_group_header(app):
if app.group.count() == 0:
app.group.create(Group(header="test"))
old_groups = app.group.get_group_list()
app.group.modify_first_group(Group(header="New header"))
new_groups = app.group.get_group_list()
assert len(old_groups) == len(new_groups)
| 2.453125 | 2 |
readme_metrics/MetricsMiddleware.py | readmeio/metrics-sdks-python | 2 | 4410 | <gh_stars>1-10
import io
import time
import datetime
from readme_metrics.Metrics import Metrics
from readme_metrics.MetricsApiConfig import MetricsApiConfig
from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper
from werkzeug import Request
class MetricsMiddleware:
"""Core middleware class for ReadMe Metrics
Attributes:
config (MetricsApiConfig): Contains the configuration settings for the running
middleware instance
"""
def __init__(self, wsgi_app_reference, config: MetricsApiConfig):
"""
Constructs and initializes MetricsMiddleware WSGI middleware to be passed into
the currently running WSGI web server.
Args:
wsgi_app_reference ([type]): Reference to the current WSGI application,
which will be wrapped
config (MetricsApiConfig): Instance of MetricsApiConfig object
"""
self.config = config
self.app = wsgi_app_reference
self.metrics_core = Metrics(config)
def __call__(self, environ, start_response):
"""Method that is called by the running WSGI server.
You should NOT be calling this method yourself under normal circumstances.
"""
response_headers = {}
response_status = 0
iterable = None
req = Request(environ)
def _start_response(_status, _response_headers, *args):
write = start_response(_status, _response_headers, *args)
# Populate response info (headers & status)
nonlocal response_headers, response_status
response_headers = _response_headers
response_status = _status
return write
try:
req.rm_start_dt = str(datetime.datetime.utcnow())
req.rm_start_ts = int(time.time() * 1000)
if req.method == "POST":
# The next 4 lines are a workaround for a serious shortcoming in the
# WSGI spec.
#
# The data can only be read once, after which the socket is exhausted
# and cannot be read again. As such, we read the data and then
# repopulate the variable so that it can be used by other code down the
# pipeline.
#
# For more info: https://stackoverflow.com/a/13106009/643951
# the environment variable CONTENT_LENGTH may be empty or missing
try:
content_length = int(environ.get("CONTENT_LENGTH", 0))
except (ValueError):
content_length = 0
content_body = environ["wsgi.input"].read(content_length)
# guarding check to close stream
if hasattr(environ["CONTENT_LENGTH"], "close"):
environ["wsgi.input"].close()
environ["wsgi.input"] = io.BytesIO(content_body)
req.rm_content_length = content_length
req.rm_body = content_body
iterable = self.app(environ, _start_response)
for data in iterable:
res_ctype = ""
res_clength = 0
htype = next(
(h for h in response_headers if h[0] == "Content-Type"), None
)
hlength = next(
(h for h in response_headers if h[0] == "Content-Length"), None
)
if htype and hlength:
res_ctype = htype[1]
res_clength = int(hlength[1])
# Populate response body
res = ResponseInfoWrapper(
response_headers,
response_status,
res_ctype,
res_clength,
data.decode("utf-8"),
)
# Send off data to be queued (and processed) by ReadMe if allowed
self.metrics_core.process(req, res)
yield data
finally:
# Undocumented in WSGI spec but the iterable has to be closed
if hasattr(iterable, "close"):
iterable.close()
| 2.25 | 2 |
kbrl.py | deekshaarya4/gymexperiments | 0 | 4411 | <filename>kbrl.py
import numpy as np
import gym
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description='KBRL with KNN')
parser.add_argument('--episodes', nargs='?', type=int, default=500)
parser.add_argument('--max_timesteps', nargs='?', type=int, default=200)
parser.add_argument('environment')
args = parser.parse_args()
env = gym.make(args.environment).env
action_space = env.action_space
# hyperparameters:
epsilon = 1.0
exploration_decay = 0.98
k = 500 # number of nearest neighbors
minimum_num_iters = 500 # number of iterations used for training
num_iter = 0
max_iters = 0
gamma = 0.95
max_state_size = 15000 # because we don't know the state space size in continuous environments
# learning-related variables
states = None
actions = {}
rewards = {}
values = {}
# episode-related variables
episode_beginning = 0
def make_move(observation, reward, done):
global states, actions, values, rewards, num_iter, episode_beginning, max_iters, epsilon
if states is None:
# first state observed
states = np.zeros((max_state_size, observation.size))
if num_iter > minimum_num_iters and np.random.rand() > epsilon and values:
# if amount of data is sufficient and values is populated (atleast one episode has been run)
# testing phase: exploitation
# Uses k=500 nearest neighbors to pick the action which has the highest reward
nbrs = NearestNeighbors(n_neighbors=min(k,max_iters)).fit(states[:max_iters])
distances, indices = nbrs.kneighbors(observation)
# find the best action
action_list = {}
freq_list = {}
for i in indices[0]:
v = values[i]
a = actions[i]
vnew = action_list.get(a, 0) + v
action_list[a] = vnew
freq_list[a] = freq_list.get(a, 0) + 1
# normalize by number of times action occured and take action with highest value
for act in action_list:
action_list[act] = action_list[act] / freq_list[act]
sorted_list = [(y,x) for x,y in action_list.items()]
sorted_list.sort(reverse=True)
take_action = sorted_list[0][1]
else:
# training phase: exploration randomly picks an action
take_action = action_space.sample()
# populate the state present, action taken and reward obtained
if num_iter < max_state_size:
states[num_iter] = observation # save the state
actions[num_iter] = take_action # and the action we took
rewards[num_iter-1] = reward # and the reward we obtained last time step
values[num_iter-1] = 0
num_iter += 1
if done:
# end of episode: calculate the value function for this episode
val = 0
for t in reversed(range(episode_beginning, num_iter)):
val = gamma * val + rewards.get(t,0)
values[t] = val
episode_beginning = num_iter
max_iters = min(max(max_iters, num_iter), max_state_size)
# decay exploration probability
epsilon *= exploration_decay
# do not decay below 0
epsilon = max(epsilon, 0)
return take_action
# Ignore sklearn warnings
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
reward = 0
episode_reward = 0
done = False
cumulative_reward_list = []
for i in range(args.episodes):
observation = env.reset()
sum_reward = 0
for j in range(args.max_timesteps):
env.render()
action = make_move(observation, reward, done)
observation, reward, done, _ = env.step(action)
sum_reward += reward
if done:
break
episode_reward = episode_reward * 0.95 + sum_reward * 0.05
print('Reward for episode '+ str(i)+' : '+str(episode_reward))
cumulative_reward_list.append(episode_reward)
# env.render()
plt.plot(range(0,500), cumulative_reward_list, linewidth=2)
plt.xlabel("Episodes")
plt.ylabel("Cumulative Reward")
plt.title("Performance")
plt.show()
plt.close()
| 2.9375 | 3 |
shardDesigner/shardTemplateDir/shardStemDir/log/elast.py | vinci-project/rootShard | 0 | 4412 | import elasticsearch
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import time, json, datetime, os
class elalog:
def __init__(self, date):
es_host = os.getenv("ES_PORT_9200_TCP_ADDR") or '<%ELASTICIP%>'
es_port = os.getenv("ES_PORT_9200_TCP_PORT") or '9200'
self.lastDate = date
self.es = Elasticsearch([{'host': es_host, 'port': es_port}])
# BLOCKS INDEX
self.blocks_index_name = "blocks-" + date
self.block_mapping = {
"settings": {
"number_of_shards": 5,
"number_of_replicas": 0
},
"mappings": {
"blocks-" + date: {
"properties": {
"@dtime": {
"type": "date",
"format": "epoch_second"
},
"hash": {
"type": "text"
},
"signatures": {
"type": "text"
},
"tcount": {
"type": "long"
},
"validator": {
"type": "text",
"fielddata": True
},
"bheight": {
"type": "long"
}
}
}
}
}
if self.es.indices.exists(self.blocks_index_name):
try:
self.es.indices.delete(index=self.blocks_index_name)
self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on create Indicies:", es1)
else:
self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping)
# TRANSACTIONS INDEX
self.transactions_index_name = "transactions-" + date
self.transactions_mapping = {
"settings": {
"number_of_shards": 5,
"number_of_replicas": 0
},
"mappings": {
"transactions-" + date: {
"properties": {
"@dtime": {
"type": "date",
"format": "epoch_second"
},
"sender": {
"type": "text",
"fielddata": True
},
"receiver": {
"type": "text",
"fielddata": True
},
"token_count": {
"type": "float"
},
"token_type": {
"type": "text",
"fielddata": True
},
"hash": {
"type": "text"
},
"block": {
"type": "long"
}
}
}
}
}
if self.es.indices.exists(self.transactions_index_name):
try:
self.es.indices.delete(index=self.transactions_index_name)
self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on create Indicies:", es1)
else:
self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping)
# BALANCE HISTORY
self.balance_index_name = "balance"
self.balance_mapping = {
"settings": {
"number_of_shards": 5,
"number_of_replicas": 0
},
"mappings": {
"balance": {
"properties": {
"@dtime": {
"type": "date",
"format": "epoch_second"
},
"user": {
"type": "text",
"fielddata": True
},
"balance": {
"type": "float"
}
}
}
}
}
if self.es.indices.exists(self.balance_index_name):
try:
self.es.indices.delete(index=self.balance_index_name)
self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on create Indicies:", es1)
else:
self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping)
# VALIDATOR STATISTIC
self.clients_index_name = "clients"
self.clients_mapping = {
"settings": {
"number_of_shards": 5,
"number_of_replicas": 0
},
"mappings": {
"clients": {
"properties": {
"@dtime": {
"type": "date",
"format": "epoch_second"
},
"ip": {
"type": "ip"
},
"geoip": {
"properties": {
"city_name": {
"type": "text"
},
"continent_name": {
"type": "text"
},
"country_iso_code": {
"type": "text"
},
"location": {
"type": "geo_point"
},
"region_name": {
"type": "text"
}
}
},
"public_key": {
"type": "text",
"fielddata": True
},
"client_type": {
"type": "text",
"fielddata": True
}
}
}
}
}
if self.es.indices.exists(self.clients_index_name):
try:
self.es.indices.delete(index=self.clients_index_name)
self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on create Indicies:", es1)
else:
self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping)
def elasticClients(self, jsons:list):
try:
helpers.bulk(self.es, jsons)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on save Validators:", es1)
print("Save Validators in elastic!")
def elasticBlock(self, timestamp:float, validator:str, tcount:int, signatures:list, hash:str, bheight:int):
index = 'blocks-' + self.lastDate
estype = 'blocks-' + self.lastDate
eljson = json.dumps({"@dtime": int(timestamp), "validator": validator, "tcount": tcount, "signatures": list(signatures), "hash": hash, "bheight": bheight}, separators=(',', ':'))
try:
self.es.index(index=str(index).lower(), doc_type=estype.lower(), body=eljson)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on send Block:", es1)
def elasticTransaction(self, jsons:list):
try:
helpers.bulk(self.es, jsons)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on save bulk Transactions:", es1)
def elasticBalanceHistory(self, balance:dict):
users = balance.keys()
jsonMas = []
print("USER LEN:", len(users))
for user in users:
eljson = {"_index": "balance", "_type": "balance", "_id": user,
"_source": {"@dtime": int(time.time()), "user": user,
"balance": balance.get(user)}}
jsonMas.append(eljson)
try:
helpers.bulk(self.es, jsonMas)
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on save balance:", es1)
def getLastEBlock(self):
query = {"aggs" : {
"max_blnum":{"max":{"field":"bheight"}}
},"size": 0
}
try:
answer = self.es.search(index="blocks-" + self.lastDate, doc_type="blocks-" + self.lastDate, body=query)
if not answer["aggregations"]["max_blnum"]["value"] == None:
return int(answer["aggregations"]["max_blnum"]["value"])
else:
return 0
except elasticsearch.ElasticsearchException as es1:
print("Elastic exception on search last block index:", es1)
| 2.546875 | 3 |
corehq/apps/sms/tests.py | dslowikowski/commcare-hq | 1 | 4413 | <reponame>dslowikowski/commcare-hq<filename>corehq/apps/sms/tests.py<gh_stars>1-10
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from util import clean_phone_number, clean_outgoing_sms_text
from django.test import TestCase
class UtilTestCase(TestCase):
def setUp(self):
pass
def testCleanPhoneNumber(self):
phone_number = " 324 23-23421241"
cleaned = clean_phone_number(phone_number)
self.assertEquals(cleaned, "+3242323421241")
def testCleanOutgoingSMSText(self):
text = u"+this is a test شسیبشسی"
cleaned = clean_outgoing_sms_text(text)
# make sure '+' and unicode get encoded for GET properly
self.assertEquals(cleaned, "%2Bthis%20is%20a%20test%20%D8%B4%D8%B3%DB%8C%D8%A8%D8%B4%D8%B3%DB%8C")
| 2.125 | 2 |
deepchem/models/atomic_conv.py | cjgalvin/deepchem | 3 | 4414 | __author__ = "<NAME>"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "MIT"
import sys
from deepchem.models import KerasModel
from deepchem.models.layers import AtomicConvolution
from deepchem.models.losses import L2Loss
from tensorflow.keras.layers import Input, Layer
import numpy as np
import tensorflow as tf
import itertools
def initializeWeightsBiases(prev_layer_size,
size,
weights=None,
biases=None,
name=None):
"""Initializes weights and biases to be used in a fully-connected layer.
Parameters
----------
prev_layer_size: int
Number of features in previous layer.
size: int
Number of nodes in this layer.
weights: tf.Tensor, optional (Default None)
Weight tensor.
biases: tf.Tensor, optional (Default None)
Bias tensor.
name: str
Name for this op, optional (Defaults to 'fully_connected' if None)
Returns
-------
weights: tf.Variable
Initialized weights.
biases: tf.Variable
Initialized biases.
"""
if weights is None:
weights = tf.random.truncated_normal([prev_layer_size, size], stddev=0.01)
if biases is None:
biases = tf.zeros([size])
w = tf.Variable(weights, name='w')
b = tf.Variable(biases, name='b')
return w, b
class AtomicConvScore(Layer):
"""The scoring function used by the atomic convolution models."""
def __init__(self, atom_types, layer_sizes, **kwargs):
super(AtomicConvScore, self).__init__(**kwargs)
self.atom_types = atom_types
self.layer_sizes = layer_sizes
def build(self, input_shape):
self.type_weights = []
self.type_biases = []
self.output_weights = []
self.output_biases = []
n_features = int(input_shape[0][-1])
layer_sizes = self.layer_sizes
num_layers = len(layer_sizes)
weight_init_stddevs = [1 / np.sqrt(x) for x in layer_sizes]
bias_init_consts = [0.0] * num_layers
for ind, atomtype in enumerate(self.atom_types):
prev_layer_size = n_features
self.type_weights.append([])
self.type_biases.append([])
self.output_weights.append([])
self.output_biases.append([])
for i in range(num_layers):
weight, bias = initializeWeightsBiases(
prev_layer_size=prev_layer_size,
size=layer_sizes[i],
weights=tf.random.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=weight_init_stddevs[i]),
biases=tf.constant(
value=bias_init_consts[i], shape=[layer_sizes[i]]))
self.type_weights[ind].append(weight)
self.type_biases[ind].append(bias)
prev_layer_size = layer_sizes[i]
weight, bias = initializeWeightsBiases(prev_layer_size, 1)
self.output_weights[ind].append(weight)
self.output_biases[ind].append(bias)
def call(self, inputs):
frag1_layer, frag2_layer, complex_layer, frag1_z, frag2_z, complex_z = inputs
atom_types = self.atom_types
num_layers = len(self.layer_sizes)
def atomnet(current_input, atomtype):
prev_layer = current_input
for i in range(num_layers):
layer = tf.nn.bias_add(
tf.matmul(prev_layer, self.type_weights[atomtype][i]),
self.type_biases[atomtype][i])
layer = tf.nn.relu(layer)
prev_layer = layer
output_layer = tf.squeeze(
tf.nn.bias_add(
tf.matmul(prev_layer, self.output_weights[atomtype][0]),
self.output_biases[atomtype][0]))
return output_layer
frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32)
frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32)
complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32)
frag1_atomtype_energy = []
frag2_atomtype_energy = []
complex_atomtype_energy = []
for ind, atomtype in enumerate(atom_types):
frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag1_layer)
frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag2_layer)
complex_outputs = tf.map_fn(lambda x: atomnet(x, ind), complex_layer)
cond = tf.equal(frag1_z, atomtype)
frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros))
cond = tf.equal(frag2_z, atomtype)
frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros))
cond = tf.equal(complex_z, atomtype)
complex_atomtype_energy.append(
tf.where(cond, complex_outputs, complex_zeros))
frag1_outputs = tf.add_n(frag1_atomtype_energy)
frag2_outputs = tf.add_n(frag2_atomtype_energy)
complex_outputs = tf.add_n(complex_atomtype_energy)
frag1_energy = tf.reduce_sum(frag1_outputs, 1)
frag2_energy = tf.reduce_sum(frag2_outputs, 1)
complex_energy = tf.reduce_sum(complex_outputs, 1)
binding_energy = complex_energy - (frag1_energy + frag2_energy)
return tf.expand_dims(binding_energy, axis=1)
class AtomicConvModel(KerasModel):
"""Implements an Atomic Convolution Model.
Implements the atomic convolutional networks as introduced in
<NAME> al. "Atomic convolutional networks for predicting protein-ligand binding affinity." arXiv preprint arXiv:1703.10603 (2017).
The atomic convolutional networks function as a variant of
graph convolutions. The difference is that the "graph" here is
the nearest neighbors graph in 3D space. The AtomicConvModel
leverages these connections in 3D space to train models that
learn to predict energetic state starting from the spatial
geometry of the model.
"""
def __init__(self,
frag1_num_atoms=70,
frag2_num_atoms=634,
complex_num_atoms=701,
max_num_neighbors=12,
batch_size=24,
atom_types=[
6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35.,
53., -1.
],
radial=[[
1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0,
7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0
], [0.0, 4.0, 8.0], [0.4]],
layer_sizes=[32, 32, 16],
learning_rate=0.001,
**kwargs):
"""
Parameters
----------
frag1_num_atoms: int
Number of atoms in first fragment
frag2_num_atoms: int
Number of atoms in sec
max_num_neighbors: int
Maximum number of neighbors possible for an atom. Recall neighbors
are spatial neighbors.
atom_types: list
List of atoms recognized by model. Atoms are indicated by their
nuclear numbers.
radial: list
TODO: add description
layer_sizes: list
TODO: add description
learning_rate: float
Learning rate for the model.
"""
# TODO: Turning off queue for now. Safe to re-activate?
self.complex_num_atoms = complex_num_atoms
self.frag1_num_atoms = frag1_num_atoms
self.frag2_num_atoms = frag2_num_atoms
self.max_num_neighbors = max_num_neighbors
self.batch_size = batch_size
self.atom_types = atom_types
rp = [x for x in itertools.product(*radial)]
frag1_X = Input(shape=(frag1_num_atoms, 3))
frag1_nbrs = Input(shape=(frag1_num_atoms, max_num_neighbors))
frag1_nbrs_z = Input(shape=(frag1_num_atoms, max_num_neighbors))
frag1_z = Input(shape=(frag1_num_atoms,))
frag2_X = Input(shape=(frag2_num_atoms, 3))
frag2_nbrs = Input(shape=(frag2_num_atoms, max_num_neighbors))
frag2_nbrs_z = Input(shape=(frag2_num_atoms, max_num_neighbors))
frag2_z = Input(shape=(frag2_num_atoms,))
complex_X = Input(shape=(complex_num_atoms, 3))
complex_nbrs = Input(shape=(complex_num_atoms, max_num_neighbors))
complex_nbrs_z = Input(shape=(complex_num_atoms, max_num_neighbors))
complex_z = Input(shape=(complex_num_atoms,))
self._frag1_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z])
self._frag2_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z])
self._complex_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z])
score = AtomicConvScore(self.atom_types, layer_sizes)([
self._frag1_conv, self._frag2_conv, self._complex_conv, frag1_z,
frag2_z, complex_z
])
model = tf.keras.Model(
inputs=[
frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs,
frag2_nbrs_z, frag2_z, complex_X, complex_nbrs, complex_nbrs_z,
complex_z
],
outputs=score)
super(AtomicConvModel, self).__init__(
model, L2Loss(), batch_size=batch_size, **kwargs)
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
batch_size = self.batch_size
def replace_atom_types(z):
def place_holder(i):
if i in self.atom_types:
return i
return -1
return np.array([place_holder(x) for x in z])
for epoch in range(epochs):
for ind, (F_b, y_b, w_b, ids_b) in enumerate(
dataset.iterbatches(
batch_size, deterministic=True, pad_batches=pad_batches)):
N = self.complex_num_atoms
N_1 = self.frag1_num_atoms
N_2 = self.frag2_num_atoms
M = self.max_num_neighbors
batch_size = F_b.shape[0]
num_features = F_b[0][0].shape[1]
frag1_X_b = np.zeros((batch_size, N_1, num_features))
for i in range(batch_size):
frag1_X_b[i] = F_b[i][0]
frag2_X_b = np.zeros((batch_size, N_2, num_features))
for i in range(batch_size):
frag2_X_b[i] = F_b[i][3]
complex_X_b = np.zeros((batch_size, N, num_features))
for i in range(batch_size):
complex_X_b[i] = F_b[i][6]
frag1_Nbrs = np.zeros((batch_size, N_1, M))
frag1_Z_b = np.zeros((batch_size, N_1))
for i in range(batch_size):
z = replace_atom_types(F_b[i][2])
frag1_Z_b[i] = z
frag1_Nbrs_Z = np.zeros((batch_size, N_1, M))
for atom in range(N_1):
for i in range(batch_size):
atom_nbrs = F_b[i][1].get(atom, "")
frag1_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j]
frag2_Nbrs = np.zeros((batch_size, N_2, M))
frag2_Z_b = np.zeros((batch_size, N_2))
for i in range(batch_size):
z = replace_atom_types(F_b[i][5])
frag2_Z_b[i] = z
frag2_Nbrs_Z = np.zeros((batch_size, N_2, M))
for atom in range(N_2):
for i in range(batch_size):
atom_nbrs = F_b[i][4].get(atom, "")
frag2_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j]
complex_Nbrs = np.zeros((batch_size, N, M))
complex_Z_b = np.zeros((batch_size, N))
for i in range(batch_size):
z = replace_atom_types(F_b[i][8])
complex_Z_b[i] = z
complex_Nbrs_Z = np.zeros((batch_size, N, M))
for atom in range(N):
for i in range(batch_size):
atom_nbrs = F_b[i][7].get(atom, "")
complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j]
inputs = [
frag1_X_b, frag1_Nbrs, frag1_Nbrs_Z, frag1_Z_b, frag2_X_b,
frag2_Nbrs, frag2_Nbrs_Z, frag2_Z_b, complex_X_b, complex_Nbrs,
complex_Nbrs_Z, complex_Z_b
]
y_b = np.reshape(y_b, newshape=(batch_size, 1))
yield (inputs, [y_b], [w_b])
| 2.265625 | 2 |
dialogue-engine/test/programytest/config/brain/test_oob.py | cotobadesign/cotoba-agent-oss | 104 | 4415 | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.config.brain.oob import BrainOOBConfiguration
from programy.clients.events.console.config import ConsoleConfiguration
class BrainOOBConfigurationTests(unittest.TestCase):
def test_oob_with_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
oobs:
default:
classname: programy.oob.defaults.default.DefaultOutOfBandProcessor
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
oobs_config = yaml.get_section("oobs", brain_config)
self.assertIsNotNone(oobs_config)
oob_config = BrainOOBConfiguration("default")
oob_config.load_config_section(yaml, oobs_config, ".")
self.assertEqual("programy.oob.defaults.default.DefaultOutOfBandProcessor", oob_config.classname)
def test_default_without_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
oobs:
default:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
oobs_config = yaml.get_section("oobs", brain_config)
self.assertIsNotNone(oobs_config)
oob_config = BrainOOBConfiguration("default")
oob_config.load_config_section(yaml, oobs_config, ".")
self.assertIsNone(oob_config.classname)
| 1.765625 | 2 |
pypad/active_skill/interfaces/orb_generator_asi.py | candyninja001/pypad | 0 | 4416 | import abc
from ...orb_attribute import OrbAttribute
# Interface for active skills that create specific orb types (whether board change, orb change, orb spawn, etc)
class OrbGeneratorASI(abc.ABC):
@abc.abstractmethod
def does_orb_generator_create_orb_attribute(self, orb_attribute: OrbAttribute) -> bool:
pass | 2.90625 | 3 |
setup.py | DivoK/mystery | 8 | 4417 | <filename>setup.py
"""
Core business logic for `mystery`.
This code will run when the package is being built and installed.
"""
import json
import pathlib
import random
import tempfile
import urllib.request
import typing
import setuptools
from setuptools.command.sdist import sdist
# Load the configuration file.
CONFIG_PATH = pathlib.Path('config.json')
CONFIG = json.load(CONFIG_PATH.open('r'))
def _get_lockfile_path() -> pathlib.Path:
"""
Assemble the lockfile's path.
:return: lockfile path.
:rtype: pathlib.Path
"""
return pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name'])
class SDistCommand(sdist):
"""
Will be registered as a replacement for pip's 'sdist' command.
"""
def run(self):
dep_lock_path = _get_lockfile_path()
try:
dep_lock_path.unlink()
except FileNotFoundError:
pass
super().run()
def _get_package_list() -> typing.List[str]:
"""
Get a list of possible packages.
:return: list of package names.
:rtype: typing.List[str]
"""
try:
# Get the top PyPI packages and use one of them.
response = urllib.request.urlopen(CONFIG['top_pypi_packages_link'])
possible_packages_raw = response.read()
except urllib.request.URLError:
# Use the offline backup file.
with open(CONFIG['top_pypi_packages_offline_backup'], 'r') as backup_file:
possible_packages_raw = backup_file.read()
return json.loads(possible_packages_raw)['rows'][: CONFIG['top_x_packages']]
def _choose_mystery_package() -> str:
"""
Choose the underlying mysterious package and handle the lockfile's state.
:return: mystery package name.
:rtype: str
"""
# To keep the chosen dependency consistent in between setup.py runs, 'mystery' uses a temporary lockfile.
dep_lock_path = _get_lockfile_path()
if dep_lock_path.exists():
# Use the locked package and unlink the lockfile.
chosen_package = dep_lock_path.read_text().strip()
dep_lock_path.unlink()
else:
# Choose a package and create the lockfile.
possible_packages = _get_package_list()
chosen_package = random.choice(
[package['project'] for package in possible_packages]
)
dep_lock_path.write_text(chosen_package) # Lock the chosen package of course.
return chosen_package
def _fix_package_name(package_name: str) -> str:
"""
Fix the package name so it could be placed in the __init__.py file.
:param package_name: mystery package name.
:type package_name: str
:return: fixed mystery package name.
:rtype: str
"""
# Transform to eligible package name.
fixed_package_name = package_name.replace('-', '_')
# Special case for the 'backports' modules.
if fixed_package_name.startswith('backports_'):
fixed_package_name.replace('_', '.', 1)
return fixed_package_name
def _write_init_py(package_name: str) -> None:
"""
Dynamically write the __init__.py for the package using the chosen package.
:param chosen_package: mystery package name.
:type chosen_package: str
:rtype: None
"""
package_name = _fix_package_name(package_name)
init_py_path = pathlib.Path('mystery')
init_py_path.mkdir(exist_ok=True)
init_py_path = init_py_path / '__init__.py'
init_py_path.write_text(
f'''
# Here we're trying to import the mystery package (it's "{package_name}" this time).
# If it exists, overwrite 'mystery' in 'sys.modules'. Else, print there was an error.
import sys
try:
import {package_name}
except ImportError as error:
print('Internal error:', error)
print("The mystery package wasn't playing nice. Sorry!")
print('Hint: you can always try to reinstall mystery and get a different package!')
sorry = 'try reinstalling mystery and get a different package!'
else:
sys.modules['mystery'] = {package_name}
sys.modules['mystery'].__mystery_init_py__ = __file__
sys.modules['mystery'].__mystery_package_name__ = '{package_name}'
del sys # We care about this only when mystery fails (and even that's inconsequential).
'''
)
def _get_long_description_data() -> typing.Tuple[str, str]:
"""
Get data regarding the long description of the package.
:return: tuple of the README.md text and the long_description type.
:rtype: typing.Tuple[str, str]
"""
with open('README.md', 'r') as readme:
return (readme.read(), 'text/markdown')
CHOSEN_PACKAGE = _choose_mystery_package()
_write_init_py(CHOSEN_PACKAGE)
LONG_DESCRIPTION, LONG_DESCRIPTION_CONTENT_TYPE = _get_long_description_data()
setuptools.setup(
name='mystery',
version='1.0.2',
description='It is a riddle, wrapped in a mystery, inside an enigma.',
url='https://github.com/DivoK/mystery',
author='<NAME>',
author_email='<EMAIL>',
packages=setuptools.find_packages(),
install_requires=[CHOSEN_PACKAGE],
cmdclass={'sdist': SDistCommand},
python_requires='>=3.6',
include_package_data=True,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
keywords='mystery setuptools fun python-packages random',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Intended Audience :: Other Audience',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 2.5 | 2 |
ADMM_primal.py | CrazyIvanPro/Optimal_Transport | 2 | 4418 | <reponame>CrazyIvanPro/Optimal_Transport<filename>ADMM_primal.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =======================================
# File Name: ADMM_primal.py
# Purpose : implementation for ADMM method
# for solving primal problem
# =======================================
from utils import get_params
import numpy as np
import sys
def ADMM_primal(mu, nu, c, iters=10000, rho=1024, alpha=1.618):
"""ADMM_primal
"""
# initialize
m, n = c.shape
pi = np.zeros((m, n))
pi_dag = np.zeros((m, n))
w = np.zeros((m, n))
u = np.zeros(m)
v = np.zeros(n)
rho_tilde = rho * 32
while rho_tilde >= rho:
for _ in range(iters):
r = ((-w + u.reshape((m, 1)) + v.reshape((1, n)) - c) / rho +
mu.reshape((m, 1)) + nu.reshape((1, n)) + pi_dag)
pi = (r - ((r.sum(axis=1) - r.sum() / (m + n + 1)) / (n + 1)).reshape((m, 1))
- ((r.sum(axis=0) - r.sum() / (m + n + 1)) / (m + 1)).reshape((1, n)))
pi_dag = np.maximum(pi + w / rho, 0.0)
u = u + alpha * rho * (mu - pi.sum(axis=1))
v = v + alpha * rho * (nu - pi.sum(axis=0))
w = w + alpha * rho * (pi - pi_dag)
rho_tilde = rho_tilde / 2
print('error_mu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 1) - mu, 1))
print('error_nu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 0) - nu, 1))
print('fvall = %.5e' % (c * pi_dag).sum())
if __name__ == '__main__':
try:
print("Test...")
_mu, _nu, _c = get_params(64, 'random')
ADMM_primal(_mu, _nu, _c)
except KeyboardInterrupt:
print (" Ctrl+C pressed...")
sys.exit(1)
| 2.546875 | 3 |
misc_scripts/CleanVCFparams.py | pombase/legacy-eg-loader | 0 | 4419 | <reponame>pombase/legacy-eg-loader
#!/usr/bin/python
import os
import sys
import pprint
import argparse
parser = argparse.ArgumentParser(description='Clean up the data for a given parameter')
parser.add_argument('--infile', help="Path to the VCF file", default='test.vcf')
parser.add_argument('--outfile', help="Path to the new VCF file", default='test.out.vcf')
parser.add_argument('--param', help="Parameter to clean", default='PL')
args = parser.parse_args()
fi = open(args.infile, 'r')
#fo = open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf', 'w')
fo = open(args.outfile, 'w')
for line in fi:
if len(line) == 0:
continue
if line[0] == '#':
fo.write(line)
continue
line = line.rstrip()
v = line.split('\t');
params = v[8].split(':')
out = v[0:8]
try:
paramIndex = params.index(args.param)
del params[paramIndex]
out.append(':'.join(params))
for d in v[9:]:
dv = d.split(':')
del dv[paramIndex]
out.append(':'.join(dv))
except ValueError:
out.append(':'.join(params))
out += v[9:]
fo.write("\t".join(out) + "\n")
fi.close()
fo.close()
| 2.40625 | 2 |
create_coherency_dataset.py | UKPLab/acl20-dialogue-coherence-assessment | 12 | 4420 | import math
import os
from copy import deepcopy
from ast import literal_eval
import pandas as pd
from math import factorial
import random
from collections import Counter, defaultdict
import sys
from nltk import word_tokenize
from tqdm import tqdm, trange
import argparse
import numpy as np
import re
import csv
from sklearn.model_selection import train_test_split
from swda.swda import CorpusReader, Transcript, Utterance
act2word = {1:"inform",2:"question", 3:"directive", 4:"commissive"}
def permute(sents, sent_DAs, amount):
""" return a list of different! permuted sentences and their respective dialog acts """
""" if amount is greater than the possible amount of permutations, only the uniquely possible ones are returned """
assert len(sents) == len(sent_DAs), "length of permuted sentences and list of DAs must be equal"
if amount == 0:
return []
permutations = [list(range(len(sents)))]
amount = min(amount, factorial(len(sents))-1)
for i in range(amount):
permutation = np.random.permutation(len(sents))
while permutation.tolist() in permutations:
permutation = np.random.permutation(len(sents))
permutations.append(permutation.tolist())
return permutations[1:] #the first one is the original, which was included s.t. won't be generated
def draw_rand_sent(act_utt_df, sent_len, amount):
""" df is supposed to be a pandas dataframe with colums 'act' and 'utt' (utterance),
with act being a number from 1 to 4 and utt being a sentence """
permutations = []
for _ in range(amount):
(utt, da, name, ix) = draw_rand_sent_from_df(act_utt_df)
sent_insert_ix = random.randint(0, sent_len-1)
permutations.append((utt, da, name, ix, sent_insert_ix))
return permutations
def draw_rand_sent_from_df(df):
ix = random.randint(0, len(df['utt'])-1)
return literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix], df['ix'][ix]
def half_perturb(sents, sent_DAs, amount):
assert len(sents) == len(sent_DAs), "length of permuted sentences and list of DAs must be equal"
permutations = [list(range(len(sents)))]
for _ in range(amount):
while True:
speaker = random.randint(0,1) # choose one of the speakers
speaker_ix = list(filter(lambda x: (x-speaker) % 2 == 0, range(len(sents))))
permuted_speaker_ix = np.random.permutation(speaker_ix)
new_sents = list(range(len(sents)))
for (i_to, i_from) in zip(speaker_ix, permuted_speaker_ix):
new_sents[i_to] = i_from
if (not new_sents == permutations[0]) and (
not new_sents in permutations or len(permutations) > math.factorial(len(speaker_ix))):
permutations.append(new_sents)
break
return permutations[1:]
def utterance_insertions(length, amount):
possible_permutations = []
original = list(range(length))
for ix in original:
for y in range(length):
if ix == y: continue
ix_removed = original[0:ix] + ([] if ix == length-1 else original[ix+1:])
ix_removed.insert(y, ix)
possible_permutations.append(deepcopy(ix_removed))
permutations = []
for _ in range(amount):
i = random.randint(0, len(possible_permutations)-1)
permutations.append(possible_permutations[i])
return permutations
class DailyDialogConverter:
def __init__(self, data_dir, tokenizer, word2id, task='', ranking_dataset = True):
self.data_dir = data_dir
self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt')
self.tokenizer = tokenizer
self.word2id = word2id
self.output_file = None
self.task = task
self.ranking_dataset = ranking_dataset
self.perturbation_statistics = 0
self.setname = os.path.split(data_dir)[1]
assert self.setname == 'train' or self.setname == 'validation' or self.setname == 'test', "wrong data dir name"
def create_act_utt(self):
dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".format(self.setname))
act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".format(self.setname))
output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task))
df = open(dial_file, 'r')
af = open(act_file, 'r')
of = open(output_file, 'w')
csv_writer = csv.writer(of, delimiter='|')
for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118):
seqs = dial.split('__eou__')
seqs = seqs[:-1]
if len(seqs) < 5:
continue
tok_seqs = [self.tokenizer(seq) for seq in seqs]
tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs]
tok_seqs = [self.word2id(seq) for seq in tok_seqs]
acts = act.split(' ')
acts = acts[:-1]
acts = [int(act) for act in acts]
for utt_i, (act, utt) in enumerate(zip(acts, tok_seqs)):
dialog_name = "{}_{}".format(self.setname, line_count)
row = (act, utt, dialog_name,utt_i)
csv_writer.writerow(row)
def convert_dset(self, amounts):
# data_dir is supposed to be the dir with the respective train/test/val-dataset files
print("Creating {} perturbations for task {}".format(amounts, self.task))
dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".format(self.setname))
act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".format(self.setname))
self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task))
root_data_dir = os.path.split(self.data_dir)[0]
shuffled_path = os.path.join(root_data_dir, "shuffled_{}".format(self.task))
if not os.path.isdir(shuffled_path):
os.mkdir(shuffled_path)
assert os.path.isfile(dial_file) and os.path.isfile(act_file), "could not find input files"
assert os.path.isfile(self.act_utt_file), "missing act_utt.txt in data_dir"
with open(self.act_utt_file, 'r') as f:
act_utt_df = pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix'])
rand_generator = lambda: draw_rand_sent_from_df(act_utt_df)
df = open(dial_file, 'r')
af = open(act_file, 'r')
of = open(self.output_file, 'w')
discarded = 0
for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118):
seqs = dial.split('__eou__')
seqs = seqs[:-1]
if len(seqs) < 5:
discarded += 1
continue
tok_seqs = [self.tokenizer(seq) for seq in seqs]
tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs]
tok_seqs = [self.word2id(seq) for seq in tok_seqs]
acts = act.split(' ')
acts = acts[:-1]
acts = [int(act) for act in acts]
if self.task == 'up':
permuted_ixs = permute(tok_seqs, acts, amounts)
elif self.task == 'us':
permuted_ixs = draw_rand_sent(act_utt_df, len(tok_seqs), amounts)
elif self.task == 'hup':
permuted_ixs = half_perturb(tok_seqs, acts, amounts)
elif self.task == 'ui':
permuted_ixs = utterance_insertions(len(tok_seqs), amounts)
shuffle_file = os.path.join(shuffled_path, "{}_{}.csv".format(self.setname, line_count))
with open(shuffle_file, "w") as f:
csv_writer = csv.writer(f)
for perm in permuted_ixs:
if self.task == 'us':
(utt, da, name, ix, insert_ix) = perm
row = [name, ix,insert_ix]
csv_writer.writerow(row)
else:
csv_writer.writerow(perm)
self.perturbation_statistics += len(permuted_ixs)
if self.task == 'us':
for p in permuted_ixs:
(insert_sent, insert_da, name, ix, insert_ix) = p
a = " ".join([str(a) for a in acts])
u = str(tok_seqs)
p_a = deepcopy(acts)
p_a[insert_ix] = insert_da
pa = " ".join([str(a) for a in p_a])
p_u = deepcopy(tok_seqs)
p_u[insert_ix] = self.word2id(insert_sent)
of.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
of.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
else:
for p in permuted_ixs:
a = " ".join([str(a) for a in acts])
u = str(tok_seqs)
pa = [acts[i] for i in p]
p_a = " ".join([str(a) for a in pa])
pu = [tok_seqs[i] for i in p]
p_u = str(pu)
of.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
of.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
print(discarded)
class SwitchboardConverter:
def __init__(self, data_dir, tokenizer, word2id, task='', seed=42):
self.corpus = CorpusReader(data_dir)
self.data_dir = data_dir
self.tokenizer = tokenizer
self.word2id = word2id
self.task = task
self.utt_num = 0
for utt in self.corpus.iter_utterances():
self.utt_num += 1
self.trans_num = 0
for trans in self.corpus.iter_transcripts():
self.trans_num += 1
self.da2num = switchboard_da_mapping()
# CAUTION: make sure that for each task the seed is the same s.t. the splits will be the same!
train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed)
val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed)
self.train_ixs, self.val_ixs, self.test_ixs = train_ixs, val_ixs, test_ixs
self.utt_da_pairs = []
prev_da = "%"
for i, utt in enumerate(self.corpus.iter_utterances()):
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.word2id(self.tokenizer(sentence))
act = utt.damsl_act_tag()
if act == None: act = "%"
if act == "+": act = prev_da
_, swda_name = os.path.split(utt.swda_filename)
swda_name = swda_name[:-4] if swda_name.endswith('.csv') else swda_name
ix = utt.utterance_index
self.utt_da_pairs.append((sentence, act, swda_name, ix))
def draw_rand_sent(self):
r = random.randint(0, len(self.utt_da_pairs)-1)
return self.utt_da_pairs[r]
def create_vocab(self):
print("Creating Vocab file for Switchboard")
cnt = Counter()
for utt in self.corpus.iter_utterances():
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.tokenizer(sentence)
for w in sentence:
cnt[w] += 1
itos_file = os.path.join(self.data_dir, "itos.txt")
itosf = open(itos_file, "w")
for (word, _) in cnt.most_common(25000):
itosf.write("{}\n".format(word))
#getKeysByValue
def swda_permute(self, sents, amount, speaker_ixs):
if amount == 0:
return []
permutations = [list(range(len(sents)))]
segment_permutations = []
amount = min(amount, factorial(len(sents))-1)
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
for i in range(amount):
while True:
permutation = []
segm_perm = np.random.permutation(len(segments))
segment_permutations.append(segm_perm)
for segm_ix in segm_perm:
utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix))
permutation = permutation + utt_ixs
if permutation not in permutations:
break
permutations.append(permutation)
return permutations[1:] , segment_permutations #the first one is the original, which was included s.t. won't be generated
def speaker_segment_ixs(self, speaker_ixs):
i = 0
segment_indices = dict()
prev_speaker = speaker_ixs[0]
for j,speaker in enumerate(speaker_ixs):
if speaker != prev_speaker:
prev_speaker = speaker
i += 1
segment_indices[j] = i
return segment_indices
def swda_half_perturb(self, amount, speaker_ixs):
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
segment_permutations = []
permutations = [list(segm_ixs.keys())]
for _ in range(amount):
speaker = random.randint(0,1) # choose one of the speakers
speaker_to_perm = list(filter(lambda x: (x-speaker) % 2 == 0, segments))
speaker_orig = list(filter(lambda x: (x-speaker) % 2 != 0, segments))
#TODO: rename either speaker_ix or speaker_ixs, they are something different, but the names are too close
if len(speaker_to_perm) < 2:
return []
while True:
permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist()
new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix))
if speaker == 0 :
new_segments[::2] = permuted_speaker_ix
new_segments[1::2] = speaker_orig
else:
new_segments[1::2] = permuted_speaker_ix
new_segments[::2] = speaker_orig
segment_permutations.append(new_segments)
permutation = []
for segm_ix in new_segments:
utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix))
permutation = permutation + utt_ixs
if not permutation in permutations:
permutations.append(permutation)
break
return permutations[1:], segment_permutations
def swda_utterance_insertion(self, speaker_ixs, amounts):
segment_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segment_ixs.values()))
segment_permutations = []
permutations = []
i = 0
for _ in range(amounts):
while True: # actually: do ... while permutation not in permutations
i_from = random.randint(0, len(segments)-1)
i_to = random.randint(0, len(segments)-2)
segm_perm = deepcopy(segments)
rem_elem = segments[i_from]
segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:]
segm_perm = segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:]
permutation = []
for segm_ix in segm_perm:
utt_ixs = sorted(getKeysByValue(segment_ixs, segm_ix))
permutation = permutation + utt_ixs
if permutation not in permutations:
permutations.append(permutation)
segment_permutations.append(segm_perm)
break
return permutations, segment_permutations
def swda_utterance_sampling(self, speaker_ixs, amount):
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
permutations = []
for i in range(amount):
(sentence, act, swda_name, ix) = self.draw_rand_sent()
insert_ix = random.choice(segments)
permutations.append((sentence, act, swda_name, ix, insert_ix))
return permutations
def convert_dset(self, amounts):
# create distinct train/validation/test files. they'll correspond to the created
# splits from the constructor
train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task))
val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task))
test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task))
if not os.path.exists(os.path.join(self.data_dir, 'train')):
os.makedirs(os.path.join(self.data_dir, 'train'))
if not os.path.exists(os.path.join(self.data_dir, 'validation')):
os.makedirs(os.path.join(self.data_dir, 'validation'))
if not os.path.exists(os.path.join(self.data_dir, 'test')):
os.makedirs(os.path.join(self.data_dir, 'test'))
trainfile = open(train_output_file, 'w')
valfile = open(val_output_file, 'w')
testfile = open(test_output_file, 'w')
shuffled_path = os.path.join(self.data_dir, "shuffled_{}".format(self.task))
if not os.path.isdir(shuffled_path):
os.mkdir(shuffled_path)
for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)):
utterances = []
acts = []
speaker_ixs = []
prev_act = "%"
for utt in trans.utterances:
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.word2id(self.tokenizer(sentence))
utterances.append(sentence)
act = utt.damsl_act_tag()
if act == None: act = "%"
if act == "+": act = prev_act
acts.append(self.da2num[act])
prev_act = act
if "A" in utt.caller:
speaker_ixs.append(0)
else:
speaker_ixs.append(1)
if self.task == 'up':
permuted_ixs , segment_perms = self.swda_permute(utterances, amounts, speaker_ixs)
elif self.task == 'us':
permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts)
elif self.task == 'hup':
permuted_ixs , segment_perms = self.swda_half_perturb(amounts, speaker_ixs)
elif self.task == 'ui':
permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts)
swda_fname = os.path.split(trans.swda_filename)[1]
shuffle_file = os.path.join(shuffled_path, swda_fname) # [:-4]
with open(shuffle_file, "w") as f:
csv_writer = csv.writer(f)
if self.task == 'us':
for perm in permuted_ixs:
(utt, da, name, ix, insert_ix) = perm
row = [name, ix,insert_ix]
csv_writer.writerow(row)
else:
for perm in segment_perms:
csv_writer.writerow(perm)
if self.task == 'us':
for p in permuted_ixs:
a = " ".join([str(x) for x in acts])
u = str(utterances)
insert_sent, insert_da, name, ix, insert_ix = p
insert_da = self.da2num[insert_da]
p_a = deepcopy(acts)
p_a[insert_ix] = insert_da
pa = " ".join([str(x) for x in p_a])
p_u = deepcopy(utterances)
p_u[insert_ix] = insert_sent
if i in self.train_ixs:
trainfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
trainfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
if i in self.val_ixs:
valfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
valfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
if i in self.test_ixs:
testfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
testfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
else:
for p in permuted_ixs:
a = " ".join([str(x) for x in acts])
u = str(utterances)
pa = [acts[i] for i in p]
p_a = " ".join([str(x) for x in pa])
pu = [utterances[i] for i in p]
p_u = str(pu)
if i in self.train_ixs:
trainfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
trainfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
if i in self.val_ixs:
valfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
valfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
if i in self.test_ixs:
testfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
testfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--datadir",
required=True,
type=str,
help="""The input directory where the files of the corpus
are located. """)
parser.add_argument("--corpus",
required=True,
type=str,
help="""the name of the corpus to use, currently either 'DailyDialog' or 'Switchboard' """)
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--amount',
type=int,
default=20,
help="random seed for initialization")
parser.add_argument('--word2id',
action='store_true',
help= "convert the words to ids")
parser.add_argument('--task',
required=True,
type=str,
default="up",
help="""for which task the dataset should be created.
alternatives: up (utterance permutation)
us (utterance sampling)
hup (half utterance petrurbation)
ui (utterance insertion, nothing directly added!)""")
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
if args.word2id:
f = open(os.path.join(args.datadir, "itos.txt"), "r")
word2id_dict = dict()
for i, word in enumerate(f):
word2id_dict[word[:-1].lower()] = i
word2id = lambda x: [word2id_dict[y] for y in x] # don't convert words to ids (yet). It gets done in the glove wrapper of mtl_coherence.py
else:
word2id = lambda x: x
tokenizer = word_tokenize
if args.corpus == 'DailyDialog':
converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task)
converter.create_act_utt()
elif args.corpus == 'Switchboard':
converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed)
converter.create_vocab()
converter.convert_dset(amounts=args.amount)
def getKeysByValue(dictOfElements, valueToFind):
listOfKeys = list()
for item in dictOfElements.items():
if item[1] == valueToFind:
listOfKeys.append(item[0])
return listOfKeys
def switchboard_da_mapping():
mapping_dict = dict({
"sd": 1,
"b": 2,
"sv": 3,
"aa": 4,
"%-": 5,
"ba": 6,
"qy": 7,
"x": 8,
"ny": 9,
"fc": 10,
"%": 11,
"qw": 12,
"nn": 13,
"bk": 14,
"h": 15,
"qy^d": 16,
"o": 17,
"bh": 18,
"^q": 19,
"bf": 20,
"na": 21,
"ny^e": 22,
"ad": 23,
"^2": 24,
"b^m": 25,
"qo": 26,
"qh": 27,
"^h": 28,
"ar": 29,
"ng": 30,
"nn^e": 31,
"br": 32,
"no": 33,
"fp": 34,
"qrr": 35,
"arp": 36,
"nd": 37,
"t3": 38,
"oo": 39,
"co": 40,
"cc": 41,
"t1": 42,
"bd": 43,
"aap": 44,
"am": 45,
"^g": 46,
"qw^d": 47,
"fa": 48,
"ft":49
})
d = defaultdict(lambda: 11)
for (k, v) in mapping_dict.items():
d[k] = v
return d
if __name__ == "__main__":
main()
| 2.578125 | 3 |
tests/utils/test_clean_accounting_column.py | richardqiu/pyjanitor | 2 | 4421 | import pytest
from janitor.utils import _clean_accounting_column
@pytest.mark.utils
def test_clean_accounting_column():
test_str = "(1,000)"
assert _clean_accounting_column(test_str) == float(-1000)
@pytest.mark.utils
def test_clean_accounting_column_zeroes():
test_str = "()"
assert _clean_accounting_column(test_str) == 0.00
| 2.5 | 2 |
downloadParagraph.py | icadot86/bert | 0 | 4422 | # coding=utf-8
import sys, getopt
import urllib
import requests
import requests_cache
import re
import time
from bs4 import BeautifulSoup
from requests import Session
sys.path.append("/home/taejoon1kim/BERT/my_bert")
from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath
from utils.path import BERT_INPUT_JSON, BERT_SEARCH_JSON
def preprocessor(text):
if "감독" in text:
return text[0:text.find("감독")]
elif "등장인물" in text:
return text[0:text.find("등장인물")]
elif "누구야" in text:
return text[0:text.find("누구야")]
elif "알려줘" in text:
return text[0:text.find("알려줘")]
elif "보여줘" in text:
return text[0:text.find("보여줘")]
elif "찾아줘" in text:
return text[0:text.find("찾아줘")]
elif "언제야" in text:
return text[0:text.find("언제")]
elif "어디" in text:
return text[0:text.find("어디")]
elif "뭐야" in text:
return text[0:text.find("뭐야")]
else :
return text
def checkQType(text):
global Q_TYPE
if "감독" in text or "어디서" in text or "언제" in text or "뭐야" in text:
Q_TYPE = 2
elif "누구야" in text:
Q_TYPE = 1
else:
Q_TYPE = 3
SEARCH_RESULT['Q_TYPE'] = Q_TYPE
print("QUESTION TYPE : ", Q_TYPE)
WIKI_URL = "wikipedia.org"
YOUTUBE_URL = "youtube.com/channel"
NO_RESULT = "no_result"
SEARCH_RESULT = {
"WIKI" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"},
"FIRST" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"},
"YOUTUBE" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"},
"test_input.json" : f"{NO_RESULT}",
"search_result.json" : f"{NO_RESULT}",
"Q_TYPE" : f"{NO_RESULT}"
}
def downloadURL(URL):
# desktop user-agent
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0"
# mobile user-agent
MOBILE_USER_AGENT = "Mozilla/5.0 (Linux; Android 7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36"
headers = {"user-agent" : USER_AGENT}
#headers = {"user-agent" : USER_AGENT, "cache-contorl" : "public,max-age=3600"}
#headers = {"user-agent" : USER_AGENT, "cache-contorl" : "no-cache"}
#s = Session()
#s.headers.update(headers)
resp = requests.get(URL, headers=headers)
#resp = s.get(URL)
results = [{"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"}]
print(resp.status_code)
if resp.status_code == 200:
soup = BeautifulSoup(resp.content, "lxml")
results = []
for g in soup.find_all('div', class_='r'):
anchors = g.find_all('a')
if anchors:
link = anchors[0]['href']
title = g.find('h3').text
item = {
"title": title,
"link": link
}
results.append(item)
#print(link)
global SEARCH_RESULT
if link.find(WIKI_URL) != -1 and SEARCH_RESULT['WIKI']['link'] == NO_RESULT:
SEARCH_RESULT['WIKI']['title'] = title
SEARCH_RESULT['WIKI']['link'] = link
elif link.find(YOUTUBE_URL) != -1 and SEARCH_RESULT['YOUTUBE']['link'] == NO_RESULT:
SEARCH_RESULT['YOUTUBE']['title'] = title
SEARCH_RESULT['YOUTUBE']['link'] = link
if SEARCH_RESULT['WIKI']['link'] != NO_RESULT and SEARCH_RESULT['YOUTUBE']['link'] != NO_RESULT:
break
SEARCH_RESULT['FIRST']['title'] = results[0].get('title')
SEARCH_RESULT['FIRST']['link'] = results[0].get('link')
else:
SEARCH_RESULT['FIRST']['title'] = f"resp.status_code {resp.status_code}"
return results
def download(text):
global cache
cache = getDownloadCachePath(text)
global start, Q_TYPE
init_start = time.time()
start = time.time()
requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache')
#if cacheExist(cache) == False:
if True:
checkQType(text)
query_text = preprocessor(text)
## 1st SEARCH
query = query_text
query = query.replace(' ', '+')
if Q_TYPE <= 2:
URL = f"https://google.com/search?q={query} site:wikipedia.org"
else :
URL = f"https://google.com/search?q={query}"
print(URL)
downloadURL(URL)
printTime("1st Search Time")
pWithoutTag = f"{NO_RESULT}"
imgTag = f"{NO_RESULT}"
## 2nd SEARCH
if SEARCH_RESULT['WIKI']['title'] == NO_RESULT and Q_TYPE > 2:
URL = f"https://google.com/search?q={query} site:wikipedia.org"
downloadURL(URL)
if SEARCH_RESULT['WIKI']['title'] == NO_RESULT:
pWithoutTag = "위키피디아가 없네요. 링크를 열어보세요"
else:
resp = requests.get(SEARCH_RESULT['WIKI']['link'])
if resp.status_code == 200:
soup = BeautifulSoup(resp.content, "lxml")
p = soup.find('p')
pWithoutTag = re.sub('<.+?>', '', str(p), 0).strip()
pWithoutTag = re.sub('"', '', str(pWithoutTag), 0).strip()
pWithoutTag = re.sub('\n', ' ', str(pWithoutTag), 0).strip()
imgTag = "http:" + soup.find('a', {'class':'image'}).find('img')['src']
## GENERATE BERT INPUT
JSON_1 = "{\"version\":\"mytest_dev\",\"data\":[{\"paragraphs\":[{\"qas\":[{\"answers\":[{\"text\":\"테스트\",\"answer_start\":0}],\"id\":\"1-1\",\"question\":\"테스트\"}],\"context\":\""
JSON_2 = "\"}],\"title\":\"테스트\"}]}"
FULL_JSON = JSON_1 + pWithoutTag + JSON_2
writeJson(FULL_JSON, BERT_INPUT_JSON)
printTime("2nd Search Time")
SEARCH_RESULT['test_input.json'] = FULL_JSON
## GENERATE SEARCH RESULT
FULL_JSON = "{\"google\":[{\"title\":\"" + SEARCH_RESULT['FIRST']['title'] + "\",\"link\":\"" + SEARCH_RESULT['FIRST']['link'] + "\"}],\"wiki\":[{\"title\":\"" + SEARCH_RESULT['WIKI']['title'] + "\",\"link\":\"" + SEARCH_RESULT['WIKI']['link'] + "\"}],\"youtube\":[{\"title\":\"" + SEARCH_RESULT['YOUTUBE']['title'] + "\",\"link\":\"" + SEARCH_RESULT['YOUTUBE']['link'] + "\"}],\"Q_TYPE\":\"" + str(Q_TYPE) + "\",\"IMG_SRC\":\"" + str(imgTag) + "\"}"
writeJson(FULL_JSON, BERT_SEARCH_JSON)
SEARCH_RESULT['search_result.json'] = FULL_JSON
writeCache(cache, SEARCH_RESULT)
else:
CACHE_RESULT = readCache(cache)
writeJson(CACHE_RESULT['test_input.json'], BERT_INPUT_JSON)
writeJson(CACHE_RESULT['search_result.json'], BERT_SEARCH_JSON)
Q_TYPE = CACHE_RESULT['Q_TYPE']
print(f"[SEARCH] Total time : {format(time.time() - init_start, '0.5f')}")
return Q_TYPE
def writeJson(json, filePath):
f = open(filePath, 'w')
f.write(json)
f.close()
def printTime(text):
global start
print(f"[SEARCH] {text} : {format(time.time() - start, '0.5f')}")
start = time.time()
def main(argv):
download(argv[1])
if __name__ == "__main__":
main(sys.argv)
| 2.578125 | 3 |
data_io.py | LucasChenLC/courseManager2 | 0 | 4423 | from xml.dom.minidom import Document, parse
class InfoBatch:
def __init__(self, title, pre_node_titles):
self.title = title
self.pre_node_titles = pre_node_titles
def save_data_xml(course_list, file_path):
doc = Document()
courses = doc.createElement('course_list')
doc.appendChild(courses)
for course in course_list:
single_course = doc.createElement('course')
courses.appendChild(single_course)
single_course_name = doc.createElement('course_name')
course_name = doc.createTextNode(course.name)
single_course.appendChild(single_course_name)
single_course_name.appendChild(course_name)
pre_course = doc.createElement('pre_course')
pre_course_name = ','.join(course.pre_course)
course_name = doc.createTextNode(pre_course_name)
single_course.appendChild(pre_course)
pre_course.appendChild(course_name)
after_course = doc.createElement('after_course')
after_course_name = ','.join(course.after_course)
course_name = doc.createTextNode(after_course_name)
single_course.appendChild(after_course)
after_course.appendChild(course_name)
with open(file_path, 'wb+') as f:
f.write(doc.toprettyxml(indent='\t', encoding='utf-8'))
def load_data_xml(file_path):
info_list = []
doc = parse(file_path)
courses = doc.getElementsByTagName("course")
for course in courses:
title = course.getElementsByTagName("course_name")[0].childNodes[0].data
try:
pre_node_titles = course.getElementsByTagName("pre_node_titles")[0].childNodes[0].data
pre_node_titles = pre_node_titles.split(',')
info_list.append(InfoBatch(title, pre_node_titles))
except IndexError:
info_list.append(InfoBatch(title, []))
return info_list
'''
course_list = []
course_list.append(Course('Advance Math'))
course_list.append(Course('Linear Algebra'))
course_list.append(Course('Procedure Oriented Programming'))
course_list.append(Course('Object Oriented Programming'))
course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming'])
course_list.append(Course('College Physics'))
course_list[-1].add_pre_course(course_list, ['Advance Math'])
course_list.append(Course('Digital Logic'))
course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming'])
course_list.append(Course('Computer Organization'))
course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital Logic'])
course_list.append(Course('Computer Architecture'))
course_list[-1].add_pre_course(course_list,
['Advance Math', 'Procedure Oriented Programming', 'Digital Logic', 'Computer Organization'])
save_data_xml(course_list, 'resource/data/data.xml')
'''
| 2.96875 | 3 |
tests/rules/test_git_rm_local_modifications.py | jlandrum/theheck | 0 | 4424 | import pytest
from theheck.rules.git_rm_local_modifications import match, get_new_command
from theheck.types import Command
@pytest.fixture
def output(target):
return ('error: the following file has local modifications:\n {}\n(use '
'--cached to keep the file, or -f to force removal)').format(target)
@pytest.mark.parametrize('script, target', [
('git rm foo', 'foo'),
('git rm foo bar', 'bar')])
def test_match(output, script, target):
assert match(Command(script, output))
@pytest.mark.parametrize('script', ['git rm foo', 'git rm foo bar', 'git rm'])
def test_not_match(script):
assert not match(Command(script, ''))
@pytest.mark.parametrize('script, target, new_command', [
('git rm foo', 'foo', ['git rm --cached foo', 'git rm -f foo']),
('git rm foo bar', 'bar', ['git rm --cached foo bar', 'git rm -f foo bar'])])
def test_get_new_command(output, script, target, new_command):
assert get_new_command(Command(script, output)) == new_command
| 2.234375 | 2 |
application.py | statisticsnorway/microdata-data-service | 0 | 4425 | <reponame>statisticsnorway/microdata-data-service
import logging
import json_logging
import tomlkit
import uvicorn
from fastapi import FastAPI, status
from fastapi.encoders import jsonable_encoder
from fastapi.openapi.docs import (
get_redoc_html,
get_swagger_ui_html,
get_swagger_ui_oauth2_redirect_html,
)
from fastapi.responses import JSONResponse
from fastapi.staticfiles import StaticFiles
from starlette.responses import PlainTextResponse, Response
from data_service.api.data_api import data_router
from data_service.api.observability_api import observability_router
from data_service.config import config
from data_service.core.processor import NotFoundException
from data_service.core.filters import EmptyResultSetException
"""
Self-hosting JavaScript and CSS for docs
https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs
"""
data_service_app = FastAPI(docs_url=None, redoc_url=None)
data_service_app.mount("/static", StaticFiles(directory="static"), name="static")
data_service_app.include_router(data_router)
data_service_app.include_router(observability_router)
@data_service_app.get("/docs", include_in_schema=False)
async def custom_swagger_ui_html():
return get_swagger_ui_html(
openapi_url=data_service_app.openapi_url,
title=data_service_app.title + " - Swagger UI",
oauth2_redirect_url=data_service_app.swagger_ui_oauth2_redirect_url,
swagger_js_url="/static/swagger-ui-bundle.js",
swagger_css_url="/static/swagger-ui.css",
)
@data_service_app.get(data_service_app.swagger_ui_oauth2_redirect_url, include_in_schema=False)
async def swagger_ui_redirect():
return get_swagger_ui_oauth2_redirect_html()
@data_service_app.get("/redoc", include_in_schema=False)
async def redoc_html():
return get_redoc_html(
openapi_url=data_service_app.openapi_url,
title=data_service_app.title + " - ReDoc",
redoc_js_url="/static/redoc.standalone.js",
)
def _get_project_meta():
with open('./pyproject.toml') as pyproject:
file_contents = pyproject.read()
return tomlkit.parse(file_contents)['tool']['poetry']
pkg_meta = _get_project_meta()
class CustomJSONLog(json_logging.JSONLogFormatter):
"""
Customized application logger
"""
def _format_log_object(self, record, request_util):
json_log_object = super(CustomJSONLog, self)._format_log_object(record, request_util)
json_log_object.update({
"message": record.getMessage()
})
if "exc_info" in json_log_object:
json_log_object["error.stack"] = json_log_object.pop('exc_info')
del json_log_object['filename']
json_log_object["@timestamp"] = json_log_object.pop('written_at')
json_log_object["loggerName"] = json_log_object.pop('logger')
json_log_object["levelName"] = json_log_object.pop('level')
json_log_object["schemaVersion"] = "v3"
json_log_object["serviceVersion"] = str(pkg_meta['version'])
json_log_object["serviceName"] = "data-service"
del json_log_object['written_ts']
del json_log_object['type']
del json_log_object['msg']
del json_log_object['module']
del json_log_object['line_no']
return json_log_object
class CustomJSONRequestLogFormatter(json_logging.JSONRequestLogFormatter):
"""
Customized request logger
"""
def _format_log_object(self, record, request_util):
json_log_object = super(CustomJSONRequestLogFormatter, self)._format_log_object(record, request_util)
json_log_object.update({
"message": record.getMessage()
})
json_log_object["@timestamp"] = json_log_object.pop('written_at')
json_log_object["xRequestId"] = json_log_object.pop('correlation_id')
json_log_object["url"] = json_log_object.pop('request')
json_log_object["source_host"] = json_log_object.pop('remote_host')
json_log_object["responseTime"] = json_log_object.pop('response_time_ms')
json_log_object["statusCode"] = json_log_object.pop('response_status')
del json_log_object['written_ts']
del json_log_object['type']
del json_log_object['remote_user']
del json_log_object['referer']
del json_log_object['x_forwarded_for']
del json_log_object['protocol']
del json_log_object['remote_ip']
del json_log_object['request_size_b']
del json_log_object['remote_port']
del json_log_object['request_received_at']
del json_log_object['response_size_b']
del json_log_object['response_content_type']
del json_log_object['response_sent_at']
return json_log_object
@data_service_app.exception_handler(EmptyResultSetException)
async def empty_result_set_exception_handler(request, exc):
log = logging.getLogger(__name__)
log.exception(exc)
return Response(
status_code=status.HTTP_204_NO_CONTENT
)
@data_service_app.exception_handler(NotFoundException)
async def not_found_exception_handler(request, exc):
log = logging.getLogger(__name__)
log.exception(exc)
return JSONResponse(
status_code=status.HTTP_404_NOT_FOUND,
content=jsonable_encoder({"detail": "No such datastructure"})
)
@data_service_app.exception_handler(Exception)
async def unknown_exception_handler(request, exc):
log = logging.getLogger(__name__)
log.exception(exc)
return PlainTextResponse("Internal Server Error", status_code=500)
@data_service_app.on_event("startup")
def startup_event():
json_logging.init_fastapi(enable_json=True, custom_formatter=CustomJSONLog)
json_logging.init_request_instrument(data_service_app, custom_formatter=CustomJSONRequestLogFormatter)
logging.basicConfig(level=logging.INFO)
json_logging.config_root_logger()
log = logging.getLogger(__name__)
log.info('Started data-service')
log.info(config.get_settings().print())
if __name__ == "__main__":
uvicorn.run(data_service_app, host="0.0.0.0", port=8000)
| 2.125 | 2 |
graspologic/embed/n2v.py | dtborders/graspologic | 0 | 4426 | <gh_stars>0
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import logging
import math
import time
from typing import Any, List, Optional, Tuple, Union
import networkx as nx
import numpy as np
from ..utils import remap_node_ids
def node2vec_embed(
graph: Union[nx.Graph, nx.DiGraph],
num_walks: int = 10,
walk_length: int = 80,
return_hyperparameter: float = 1.0,
inout_hyperparameter: float = 1.0,
dimensions: int = 128,
window_size: int = 10,
workers: int = 8,
iterations: int = 1,
interpolate_walk_lengths_by_node_degree: bool = True,
random_seed: Optional[int] = None,
) -> Tuple[np.array, List[Any]]:
"""
Generates a node2vec embedding from a given graph. Will follow the word2vec algorithm to create the embedding.
Parameters
----------
graph: Union[nx.Graph, nx.DiGraph]
A networkx graph or digraph. A multigraph should be turned into a non-multigraph so that the calling user
properly handles the multi-edges (i.e. aggregate weights or take last edge weight).
If the graph is unweighted, the weight of each edge will default to 1.
num_walks : int
Number of walks per source. Default is 10.
walk_length: int
Length of walk per source. Default is 80.
return_hyperparameter : float
Return hyperparameter (p). Default is 1.0
inout_hyperparameter : float
Inout hyperparameter (q). Default is 1.0
dimensions : int
Dimensionality of the word vectors. Default is 128.
window_size : int
Maximum distance between the current and predicted word within a sentence. Default is 10.
workers : int
Use these many worker threads to train the model. Default is 8.
iterations : int
Number of epochs in stochastic gradient descent (SGD)
interpolate_walk_lengths_by_node_degree : bool
Use a dynamic walk length that corresponds to each nodes
degree. If the node is in the bottom 20 percentile, default to a walk length of 1. If it is in the top 10
percentile, use ``walk_length``. If it is in the 20-80 percentiles, linearly interpolate between 1 and ``walk_length``.
This will reduce lower degree nodes from biasing your resulting embedding. If a low degree node has the same
number of walks as a high degree node (which it will if this setting is not on), then the lower degree nodes
will take a smaller breadth of random walks when compared to the high degree nodes. This will result in your
lower degree walks dominating your higher degree nodes.
random_seed : int
Seed to be used for reproducible results. Default is None and will produce a random output. Note that for a fully
deterministically-reproducible run, you must also limit to a single worker thread (`workers=1`), to eliminate
ordering jitter from OS thread scheduling. In addition the environment variable ``PYTHONHASHSEED`` must be set
to control hash randomization.
Returns
-------
Tuple[np.array, List[Any]]
A tuple containing a matrix, with each row index corresponding to the embedding for each node. The tuple
also contains a vector containing the corresponding vertex labels for each row in the matrix.
The matrix and vector are positionally correlated.
Notes
-----
The original reference implementation of node2vec comes from Aditya Grover from
https://github.com/aditya-grover/node2vec/.
Further details on the Alias Method used in this functionality can be found at
https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
References
----------
.. [1] <NAME> and <NAME> "node2vec: Scalable Feature Learning for Networks."
Knowledge Discovery and Data Mining, 2016.
"""
_preconditions(
graph,
num_walks,
walk_length,
return_hyperparameter,
inout_hyperparameter,
dimensions,
window_size,
workers,
iterations,
interpolate_walk_lengths_by_node_degree,
)
random_state = np.random.RandomState(seed=random_seed)
node2vec_graph = _Node2VecGraph(
graph, return_hyperparameter, inout_hyperparameter, random_state
)
logging.info(
f"Starting preprocessing of transition probabilities on graph with {str(len(graph.nodes()))} nodes and "
f"{str(len(graph.edges()))} edges"
)
start = time.time()
logging.info(f"Starting at time {str(start)}")
node2vec_graph._preprocess_transition_probabilities()
logging.info(f"Simulating walks on graph at time {str(time.time())}")
walks = node2vec_graph._simulate_walks(
num_walks, walk_length, interpolate_walk_lengths_by_node_degree
)
logging.info(f"Learning embeddings at time {str(time.time())}")
model = _learn_embeddings(
walks, dimensions, window_size, workers, iterations, random_seed
)
end = time.time()
logging.info(
f"Completed. Ending time is {str(end)} Elapsed time is {str(start - end)}"
)
labels = node2vec_graph.original_graph.nodes()
remapped_labels = node2vec_graph.label_map_to_string
return (
np.array([model.wv.get_vector(remapped_labels[node]) for node in labels]),
labels,
)
def _assert_is_positive_int(name: str, value: int):
if not isinstance(value, int):
raise TypeError(f"{name} must be an int")
if value <= 0:
raise ValueError(f"{name} must be > 0")
def _assert_is_nonnegative_float(name: str, value: float):
if not isinstance(value, float):
raise TypeError(f"{name} must be a float")
if value < 0.0:
raise ValueError(f"{name} must be >= 0.0")
def _preconditions(
graph: Union[nx.Graph, nx.DiGraph],
num_walks: int,
walk_length: int,
return_hyperparameter: float,
inout_hyperparameter: float,
dimensions: int,
window_size: int,
workers: int,
iterations: int,
interpolate_walk_lengths_by_node_degree: bool,
):
if not isinstance(graph, nx.Graph):
raise TypeError("graph must be a networkx Graph or DiGraph")
if graph.is_multigraph():
raise ValueError(
"This function does not work on multigraphs - because there are two reasonable ways to treat a "
"multigraph with different behaviors, we insist that the caller create an appropriate Graph or "
"DiGraph that represents the manner in which they'd like the multigraph to be treated for the "
"purposes of this embedding"
)
_assert_is_positive_int("num_walks", num_walks)
_assert_is_positive_int("walk_length", walk_length)
_assert_is_nonnegative_float("return_hyperparameter", return_hyperparameter)
_assert_is_nonnegative_float("inout_hyperparameter", inout_hyperparameter)
_assert_is_positive_int("dimensions", dimensions)
_assert_is_positive_int("window_size", window_size)
_assert_is_positive_int("workers", workers)
_assert_is_positive_int("iterations", iterations)
if not isinstance(interpolate_walk_lengths_by_node_degree, bool):
raise TypeError("interpolate_walk_lengths_by_node_degree must be a bool")
def _learn_embeddings(
walks: List[Any],
dimensions: int,
window_size: int,
workers: int,
iterations: int,
random_seed: Optional[int],
):
"""
Learn embeddings by optimizing the skip-gram objective using SGD.
"""
from gensim.models import Word2Vec
walks = [list(map(str, walk)) for walk in walks]
# Documentation - https://radimrehurek.com/gensim/models/word2vec.html
model = Word2Vec(
walks,
size=dimensions,
window=window_size,
min_count=0,
sg=1, # Training algorithm: 1 for skip-gram; otherwise CBOW
workers=workers,
iter=iterations,
seed=random_seed,
)
return model
class _Node2VecGraph:
"""
Temporary inner state object for constructing the random walks
Parameters
----------
graph: nx.Graph
A networkx graph
return_hyperparameter : float
Return hyperparameter
inout_hyperparameter : float
Inout hyperparameter
random_state : np.random.RandomState
Random State for reproducible results. Default is None and will produce random
results
"""
def __init__(
self,
graph: nx.Graph,
return_hyperparameter: float,
inout_hyperparameter: float,
random_state: Optional[np.random.RandomState] = None,
):
self.original_graph: nx.Graph = graph
graph_with_new_ids, new_id_map = remap_node_ids(graph=graph)
self.graph = graph_with_new_ids
self.label_map_to_string = new_id_map
self.is_directed = self.graph.is_directed()
self.p = return_hyperparameter
self.q = inout_hyperparameter
self.random_state = random_state
def node2vec_walk(
self,
walk_length: int,
start_node: Any,
degree_percentiles: Optional[np.ndarray],
):
"""
Simulate a random walk starting from start node.
"""
graph = self.graph
alias_nodes = self.alias_nodes
alias_edges = self.alias_edges
walk = [start_node]
# Percentiles will be provided if we are using the 'interpolate_walk_lengths_by_node_degree' feature.
# the intent of the code is to default the bottom 20% of to a minimal walk length, default the top 10% to a
# maximum walk length, and interpolate the inner 70% linearly from min to max.
# This is to avoid having your random walks be dominated by low degree nodes. If the low degree nodes have the
# same number of walks as the high degree nodes, the low degree nodes will take a smaller breadth of paths
# (due to their being less nodes to choose from) and will bias your resulting Word2Vec embedding
if degree_percentiles is not None:
degree = nx.degree(graph, start_node)
walk_length = self._get_walk_length_interpolated(
degree, degree_percentiles, walk_length
)
while len(walk) < walk_length:
current = walk[-1]
current_neighbors = sorted(graph.neighbors(current))
if len(current_neighbors) > 0:
if len(walk) == 1:
walk.append(
current_neighbors[
_alias_draw(
alias_nodes[current][0],
alias_nodes[current][1],
self.random_state,
)
]
)
else:
prev = walk[-2]
next = current_neighbors[
_alias_draw(
alias_edges[(prev, current)][0],
alias_edges[(prev, current)][1],
self.random_state,
)
]
walk.append(next)
else:
break
return walk
@staticmethod
def _get_walk_length_interpolated(
degree: int, percentiles: list, max_walk_length: int
):
"""
Given a node's degree, determine the length of a walk that should be used. If the degree is less than the
first element of the percentiles list, default the walk length to 1. Otherwise, if the degree is greater
than the last element of the list, default it to the max_walk_length. If it falls in the middle, do a linear
interpolation to decide the length of the walk.
"""
new_walk_length = None
for i, percentile in enumerate(percentiles):
# if we are below the first percentile in the list, default to a walk length of 1
if i == 0 and degree < percentile:
return 1
# otherwise, find which bucket we are going to be in.
if degree <= percentile:
new_walk_length = max_walk_length * ((i * 0.1) + 0.2)
break
# the degree is above the last percentile
if not new_walk_length:
new_walk_length = max_walk_length
# a walk length of 0 is invalid but can happen depending on the percentiles used
if new_walk_length < 1:
new_walk_length = 1
return math.floor(new_walk_length)
def _simulate_walks(
self,
num_walks: int,
walk_length: int,
interpolate_walk_lengths_by_node_degree: bool = False,
):
"""
Repeatedly simulate random walks from each node.
"""
graph = self.graph
walks = []
nodes = list(graph.nodes())
degree_percentiles: Optional[np.ndarray] = None
if interpolate_walk_lengths_by_node_degree:
degree_percentiles = np.percentile(
[degree for _, degree in graph.degree()], [x for x in range(20, 90, 10)]
)
for walk_iteration in range(num_walks):
logging.info(
"Walk iteration: " + str(walk_iteration + 1) + "/" + str(num_walks)
)
self.random_state.shuffle(nodes)
for node in nodes:
walks.append(
self.node2vec_walk(
walk_length=walk_length,
start_node=node,
degree_percentiles=degree_percentiles,
)
)
return walks
def _get_alias_edge(self, source: Any, destination: Any):
"""
Get the alias edge setup lists for a given edge.
"""
graph = self.graph
p = self.p
q = self.q
unnormalized_probs = []
for destination_neighbor in sorted(graph.neighbors(destination)):
if destination_neighbor == source:
unnormalized_probs.append(
graph[destination][destination_neighbor].get("weight", 1) / p
)
elif graph.has_edge(destination_neighbor, source):
unnormalized_probs.append(
graph[destination][destination_neighbor].get("weight", 1)
)
else:
unnormalized_probs.append(
graph[destination][destination_neighbor].get("weight", 1) / q
)
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs]
return _alias_setup(normalized_probs)
def _preprocess_transition_probabilities(self, weight_default: float = 1.0):
"""
Preprocessing of transition probabilities for guiding the random walks.
"""
graph = self.graph
is_directed = self.is_directed
alias_nodes = {}
total_nodes = len(graph.nodes())
bucket = 0
current_node = 0
quotient = int(total_nodes / 10)
logging.info(
f"Beginning preprocessing of transition probabilities for {total_nodes} vertices"
)
for node in graph.nodes():
current_node += 1
if current_node > bucket * quotient:
bucket += 1
logging.info(f"Completed {current_node} / {total_nodes} vertices")
unnormalized_probs = [
graph[node][nbr].get("weight", weight_default)
for nbr in sorted(graph.neighbors(node))
]
norm_const = sum(unnormalized_probs)
normalized_probs = [
float(u_prob) / norm_const for u_prob in unnormalized_probs
]
alias_nodes[node] = _alias_setup(normalized_probs)
logging.info(
f"Completed preprocessing of transition probabilities for vertices"
)
alias_edges = {}
total_edges = len(graph.edges())
bucket = 0
current_edge = 0
quotient = int(total_edges / 10)
logging.info(
f"Beginning preprocessing of transition probabilities for {total_edges} edges"
)
if is_directed:
for edge in graph.edges():
current_edge += 1
if current_edge > bucket * quotient:
bucket += 1
logging.info(f"Completed {current_edge} / {total_edges} edges")
alias_edges[edge] = self._get_alias_edge(edge[0], edge[1])
else:
for edge in graph.edges():
current_edge += 1
if current_edge > bucket * quotient:
bucket += 1
logging.info(f"Completed {current_edge} / {total_edges} edges")
alias_edges[edge] = self._get_alias_edge(edge[0], edge[1])
alias_edges[(edge[1], edge[0])] = self._get_alias_edge(edge[1], edge[0])
logging.info(f"Completed preprocessing of transition probabilities for edges")
self.alias_nodes = alias_nodes
self.alias_edges = alias_edges
return
def _alias_setup(probabilities: List[float]):
"""
Compute utility lists for non-uniform sampling from discrete distributions.
Refer to
https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
for details
"""
number_of_outcomes = len(probabilities)
alias = np.zeros(number_of_outcomes)
sampled_probabilities = np.zeros(number_of_outcomes, dtype=int)
smaller = []
larger = []
for i, prob in enumerate(probabilities):
alias[i] = number_of_outcomes * prob
if alias[i] < 1.0:
smaller.append(i)
else:
larger.append(i)
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
sampled_probabilities[small] = large
alias[large] = alias[large] + alias[small] - 1.0
if alias[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
return sampled_probabilities, alias
def _alias_draw(
probabilities: List[float], alias: List[float], random_state: np.random.RandomState
):
"""
Draw sample from a non-uniform discrete distribution using alias sampling.
"""
number_of_outcomes = len(probabilities)
random_index = int(np.floor(random_state.rand() * number_of_outcomes))
if random_state.rand() < alias[random_index]:
return random_index
else:
return probabilities[random_index]
| 2.984375 | 3 |
bot.py | NotBlizzard/blizzybot | 0 | 4427 | <gh_stars>0
# bot.py
# TODO:
# organize imports
# organize
from websocket import create_connection
from threading import Thread
from battle import Battle
import commands
import traceback
import requests
import inspect
import json
from fractions import Fraction
import random
import time
import sys
import re
import os
from learn import Learn
class Bot:
pokedex = json.loads(open(os.path.join(os.path.dirname(__file__), "./data/pokedex.json"), "r").read())
pokemon_teams = json.loads(open(os.path.join(os.path.dirname(__file__), "./data/pokemon_teams.json"), "r").read())
def __init__(self, username, password, server, admins, rooms, symbol, avatar, plugins, log):
self.start_time = float(time.time())
self.commands = []
self.last_message = {}
self.i = 0
self.url = "http://play.pokemonshowdown.com/action.php"
self.room = ""
self.username = username
self.password = password
self.joined_all_rooms = False
self.avatar = avatar
self.server = server
self.admins = admins
self.rooms = rooms
self.symbol = symbol
self.battles = []
self.plugins = plugins
self.rooms_joined = []
self.log = log
self.tiers = ["randombattle", "ou", "ubers", "uu", "ru", "nu", "pu", "lc", "anythinggoes", "battlespotsingles"]
def __str__(self):
return "<Bot:{}>".format(self.username)
def join(self, room):
self.ws.send("|/join {}".format(room))
def current_battle(self):
return [i for i in self.battles if i.room == self.room][0]
def battle(self, message):
message[1] = re.sub(r'[^A-z0-9]', '', message[1])
if message[1] == "turn" or message[1] == "start":
getattr(self.current_battle()[self.room], "decide")()
else:
getattr(self.current_battle()[self.room], message[1])(message)
def plugin(self, room, plugin, message):
self.ws.send("{}|{}".format(room, plugin.run(message, self.last_message[self.room])))
def command(self, message, room, user):
cmd = message[4].split(self.symbol)[1].split(" ")[0]
try:
if " " in message[4]:
args = message[4].split("{} ".format(cmd))[1]
else:
args = []
command = getattr(commands, "command_{}".format(cmd), __name__)(args, room.strip().lower(), user.lower(), self)
self.ws.send("{}|{}".format(room, command))
except (IndexError, TypeError):
print(traceback.print_exc())
self.ws.send("{}|Luffy: so it's a mystery command! (\"{}\" is not recognized)".format(room, cmd))
except:
print(traceback.print_exc())
self.ws.send("{}|Something went wrong.".format(room))
def login(self, message):
key = message[2]
challenge = message[3]
if self.password == "":
data = { "act": "getassertion", "userid": self.username, "challengekeyid": key, "challenge": challenge }
data = requests.get(self.url, data=data)
self.ws.send("|/trn {},0,{}".format(self.username, data.text))
else:
data = { "act": "login", "name": self.username, "pass": self.password, "challengekeyid": key, "challenge": challenge }
data = requests.post(self.url, data=data)
data = json.loads(data.text.split("]")[1])
self.ws.send("|/trn {},0,{}".format(self.username, data["assertion"]))
def disconnect(self):
self.ws = None
sys.exit()
def start(self):
try:
self.connect()
except SystemExit:
return sys.exit()
def message(self, messages):
timestamp = int(messages[2])
user = messages[3]
print(self.room)
print(self.rooms_joined)
match_line = [x for x in self.plugins if re.match(x.match_line, messages[4], flags=re.IGNORECASE)]
if len(match_line) > 0 and self.room in self.rooms_joined:
plugin = [x for x in self.plugins if x == match_line[0]][0]
if self.room == "lobby":
self.room = ""
self.commands.append(Thread(target=self.plugin, args=(self.room, plugin, messages)).start())
if self.room in self.rooms_joined and messages[4][0] == self.symbol:
if self.room == "lobby":
self.room = ""
self.commands.append(Thread(target=self.command, args=(messages, self.room, user)).start())
def battle_message(self, messages):
user = re.sub(r'[^A-z0-9]', '', messages[2])
if messages[3][0] == self.symbol:
messages = [""] + messages # now the list has five elements.
self.commands.append(Thread(target=self.command, args=(messages, self.room, " " + user)).start())
def raw(self, messages):
if self.rooms[self.i] not in self.rooms_joined and "infobox" in messages[2]:
if self.rooms[self.i] == "lobby":
self.rooms[self.i] = ""
self.rooms_joined.append(self.rooms[self.i])
if len(self.rooms) > self.i + 1:
self.i += 1
def update(self):
[self.join(room) for room in self.rooms]
def request(self, messages):
data = [x for x in self.battles if self.room in str(x)]
battle_tier = re.search("battle-(.+)-(\d+)", self.room).group(1)
if len(data) == 0: # new battle
self.battles.append(Battle(battle_tier, self.room, self))
print("NEW BATTLE")
self.battles[-1].run(messages)
else:
pass
def update_battle(self, messages):
data = json.loads(messages[2])
if len(data["challengesFrom"].keys()) > 0:
who = list(data["challengesFrom"].keys())[0]
tier = data["challengesFrom"][who]
if tier in self.tiers:
if "random" not in tier:
team = Bot.pokemon_teams[tier][random.choice(list(Bot.pokemon_teams[tier].keys()))]
self.ws.send("|/utm {}".format(team))
self.ws.send("|/accept {}".format(who))
def connect(self):
self.ws = create_connection("ws://{}/showdown/websocket".format(self.server))
while True:
messages = [x for x in self.ws.recv().split("\n")]
for message in messages:
print("it is ")
print(self.rooms_joined)
if self.log:
print(message.encode("utf-8", "ignore"))
try:
if ">" in self.last_message:
self.room = message[1:]
except:
self.room = "" # lobby
message = message.split("|")
# battles
if self.room in [x.room for x in self.battles] and len(message) > 1:
battle = [i for i in self.battles if i.room == self.room][0]
battle.run(message)
if len(message) > 1:
if message[1] == "c:":
self.message(message)
self.last_message[self.room] = message
elif message[1] == "title":
room = re.sub(r' ', '', message[2].lower())
self.rooms_joined.append(room)
elif message[1] == "raw":
self.raw(message)
elif message[1] == "c":
self.battle_message(message)
elif message[1] == "challstr":
self.login(message)
elif message[1] == "updateuser":
if not self.joined_all_rooms:
for room in self.rooms:
self.join(room)
self.joined_all_rooms = True
elif message[1] == "request":
self.request(message)
elif message[1] == "updatechallenges":
self.update_battle(message)
else:
pass
| 2.421875 | 2 |
stRT/tdr/widgets/changes.py | Yao-14/stAnalysis | 0 | 4428 | from typing import Optional, Tuple, Union
import numpy as np
import pandas as pd
import pyvista as pv
from pyvista import DataSet, MultiBlock, PolyData, UnstructuredGrid
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from .ddrtree import DDRTree, cal_ncenter
from .slice import euclidean_distance, three_d_slice
####################################
# Changes along a vector direction #
####################################
def changes_along_line(
model: Union[PolyData, UnstructuredGrid],
key: Union[str, list] = None,
n_points: int = 100,
vec: Union[tuple, list] = (1, 0, 0),
center: Union[tuple, list] = None,
) -> Tuple[np.ndarray, np.ndarray, MultiBlock, MultiBlock]:
slices, line_points, line = three_d_slice(
model=model, method="line", n_slices=n_points, vec=vec, center=center
)
x, y = [], []
x_length = 0
for slice, (point_i, point) in zip(slices, enumerate(line_points)):
change_value = np.asarray(slice[key]).sum()
y.append(change_value)
if point_i == 0:
x.append(0)
else:
point1 = line_points[point_i - 1].points.flatten()
point2 = line_points[point_i].points.flatten()
ed = euclidean_distance(instance1=point1, instance2=point2, dimension=3)
x_length += ed
x.append(x_length)
return np.asarray(x), np.asarray(y), slices, line
#################################
# Changes along the model shape #
#################################
def changes_along_shape(
model: Union[PolyData, UnstructuredGrid],
spatial_key: Optional[str] = None,
key_added: Optional[str] = "rd_spatial",
dim: int = 2,
inplace: bool = False,
**kwargs,
):
model = model.copy() if not inplace else model
X = model.points if spatial_key is None else model[spatial_key]
DDRTree_kwargs = {
"maxIter": 10,
"sigma": 0.001,
"gamma": 10,
"eps": 0,
"dim": dim,
"Lambda": 5 * X.shape[1],
"ncenter": cal_ncenter(X.shape[1]),
}
DDRTree_kwargs.update(kwargs)
Z, Y, stree, R, W, Q, C, objs = DDRTree(X, **DDRTree_kwargs)
# Obtain the real part of the complex argument
model[key_added] = np.real(W).astype(np.float64)
return model if not inplace else None
##############################
# Changes along the branches #
##############################
def ElPiGraph_tree(
X: np.ndarray,
NumNodes: int = 50,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a principal elastic tree.
Reference: Albergante et al. (2020), Robust and Scalable Learning of Complex Intrinsic Dataset Geometry via ElPiGraph.
Args:
X: DxN, data matrix list.
NumNodes: The number of nodes of the principal graph. Use a range of 10 to 100 for ElPiGraph approach.
**kwargs: Other parameters used in elpigraph.computeElasticPrincipalTree. For details, please see:
https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py
Returns:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
"""
try:
import elpigraph
except ImportError:
raise ImportError(
"You need to install the package `elpigraph-python`."
"\nInstall elpigraph-python via `pip install git+https://github.com/j-bac/elpigraph-python.git`."
)
ElPiGraph_kwargs = {
"alpha": 0.01,
"FinalEnergy": "Penalized",
"StoreGraphEvolution": True,
"GPU": False,
}
ElPiGraph_kwargs.update(kwargs)
if ElPiGraph_kwargs["GPU"] is True:
try:
import cupy
except ImportError:
raise ImportError(
"You need to install the package `cupy`."
"\nInstall cupy via `pip install cupy-cuda113`."
)
elpi_tree = elpigraph.computeElasticPrincipalTree(
X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs
)
nodes = elpi_tree[0]["NodePositions"] # ['AllNodePositions'][k]
matrix_edges_weights = elpi_tree[0]["ElasticMatrix"] # ['AllElasticMatrices'][k]
matrix_edges_weights = np.triu(matrix_edges_weights, 1)
edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose()
return nodes, edges
def SimplePPT_tree(
X: np.ndarray,
NumNodes: int = 50,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a simple principal tree.
Reference: Mao et al. (2015), SimplePPT: A simple principal tree algorithm, SIAM International Conference on Data Mining.
Args:
X: DxN, data matrix list.
NumNodes: The number of nodes of the principal graph. Use a range of 100 to 2000 for PPT approach.
**kwargs: Other parameters used in simpleppt.ppt. For details, please see:
https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py
Returns:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
"""
try:
import igraph
import simpleppt
except ImportError:
raise ImportError(
"You need to install the package `simpleppt` and `igraph`."
"\nInstall simpleppt via `pip install -U simpleppt`."
"\nInstall igraph via `pip install -U igraph`"
)
SimplePPT_kwargs = {
"seed": 1,
"lam": 10,
}
SimplePPT_kwargs.update(kwargs)
X = np.asarray(X)
ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs)
R = ppt_tree.R
nodes = (np.dot(X.T, R) / R.sum(axis=0)).T
B = ppt_tree.B
edges = np.array(
igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected").get_edgelist()
)
return nodes, edges
def map_points_to_branch(
model: Union[PolyData, UnstructuredGrid],
nodes: np.ndarray,
spatial_key: Optional[str] = None,
key_added: Optional[str] = "nodes",
inplace: bool = False,
**kwargs,
):
"""
Find the closest principal tree node to any point in the model through KDTree.
Args:
model: A reconstruct model.
nodes: The nodes in the principal tree.
spatial_key: The key that corresponds to the coordinates of the point in the model. If spatial_key is None,
the coordinates are model.points.
key_added: The key under which to add the nodes labels.
inplace: Updates model in-place.
kwargs: Other parameters used in scipy.spatial.KDTree.
Returns:
A model, which contains the following properties:
`model.point_data[key_added]`, the nodes labels array.
"""
from scipy.spatial import KDTree
model = model.copy() if not inplace else model
X = model.points if spatial_key is None else model[spatial_key]
nodes_kdtree = KDTree(np.asarray(nodes), **kwargs)
_, ii = nodes_kdtree.query(np.asarray(X), k=1)
model.point_data[key_added] = ii
return model if not inplace else None
def map_gene_to_branch(
model: Union[PolyData, UnstructuredGrid],
tree: PolyData,
key: Union[str, list],
nodes_key: Optional[str] = "nodes",
inplace: bool = False,
):
"""
Find the closest principal tree node to any point in the model through KDTree.
Args:
model: A reconstruct model contains the gene expression label.
tree: A three-dims principal tree model contains the nodes label.
key: The key that corresponds to the gene expression.
nodes_key: The key that corresponds to the coordinates of the nodes in the tree.
inplace: Updates tree model in-place.
Returns:
A tree, which contains the following properties:
`tree.point_data[key]`, the gene expression array.
"""
model = model.copy()
model_data = pd.DataFrame(model[nodes_key], columns=["nodes_id"])
key = [key] if isinstance(key, str) else key
for sub_key in key:
model_data[sub_key] = np.asarray(model[sub_key])
model_data = model_data.groupby(by="nodes_id").sum()
model_data["nodes_id"] = model_data.index
model_data.index = range(len(model_data.index))
tree = tree.copy() if not inplace else tree
tree_data = pd.DataFrame(tree[nodes_key], columns=["nodes_id"])
tree_data = pd.merge(tree_data, model_data, how="outer", on="nodes_id")
tree_data.fillna(value=0, inplace=True)
for sub_key in key:
tree.point_data[sub_key] = tree_data[sub_key].values
return tree if not inplace else None
def construct_tree_model(
nodes: np.ndarray,
edges: np.ndarray,
key_added: Optional[str] = "nodes",
) -> PolyData:
"""
Construct a principal tree model.
Args:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
key_added: The key under which to add the nodes labels.
Returns:
A three-dims principal tree model, which contains the following properties:
`tree_model.point_data[key_added]`, the nodes labels array.
"""
padding = np.empty(edges.shape[0], int) * 2
padding[:] = 2
edges_w_padding = np.vstack((padding, edges.T)).T
tree_model = pv.PolyData(nodes, edges_w_padding)
tree_model.point_data[key_added] = np.arange(0, len(nodes), 1)
return tree_model
def changes_along_branch(
model: Union[PolyData, UnstructuredGrid],
spatial_key: Optional[str] = None,
map_key: Union[str, list] = None,
key_added: Optional[str] = "nodes",
rd_method: Literal["ElPiGraph", "SimplePPT"] = "ElPiGraph",
NumNodes: int = 50,
inplace: bool = False,
**kwargs,
) -> Tuple[Union[DataSet, PolyData, UnstructuredGrid], PolyData]:
model = model.copy() if not inplace else model
X = model.points if spatial_key is None else model[spatial_key]
if rd_method == "ElPiGraph":
nodes, edges = ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs)
elif rd_method == "SimplePPT":
nodes, edges = SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs)
else:
raise ValueError(
"`rd_method` value is wrong."
"\nAvailable `rd_method` are: `'ElPiGraph'`, `'SimplePPT'`."
)
map_points_to_branch(
model=model,
nodes=nodes,
spatial_key=spatial_key,
key_added=key_added,
inplace=True,
)
tree_model = construct_tree_model(nodes=nodes, edges=edges)
if not (map_key is None):
map_gene_to_branch(
model=model, tree=tree_model, key=map_key, nodes_key=key_added, inplace=True
)
return model if not inplace else None, tree_model
| 2.21875 | 2 |
test/test_add_group.py | nkoshkina/Python_Training3 | 0 | 4429 | <filename>test/test_add_group.py<gh_stars>0
# -*- coding: utf-8 -*-
from model.group import Group
import pytest
import allure_pytest
def test_add_group(app, db, check_ui, json_groups):
group0 = json_groups
#with pytest.allure.step("Given a group list"):
old_groups = db.get_group_list()
#with pytest.allure.step("When I add a group %s to the list" % group0):
app.group.create(group0)
#assert app.group.count() == len(old_groups) + 1
#with pytest.allure.step("When the new groups list is equal old list with added group"):
new_groups = db.get_group_list()
old_groups.append(group0)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui:
print("CHECK_UI")
assert sorted(new_groups, key=Group.id_or_max) == \
sorted(app.group.get_groups_list(), key=Group.id_or_max)
| 2.578125 | 3 |
cyberbrain/frame_tree.py | testinggg-art/Cyberbrain | 0 | 4430 | from __future__ import annotations
from .frame import Frame
from .generated.communication_pb2 import CursorPosition
class FrameTree:
"""A tree to store all frames. For now it's a fake implementation.
Each node in the tree represents a frame that ever exists during program execution.
Caller and callee frames are connected. Call order is preserved among callee frames
of the same caller frame.
Nodes are also indexed by frames' physical location (file name, line range).
TODO:
- Add indexes.
- Implement frame search.
"""
# Keyed by frame ID.
frames: dict[str, Frame] = dict()
@classmethod
def add_frame(cls, frame_id, frame: Frame):
cls.frames[frame_id] = frame
print(frame_id, frame)
@classmethod
def find_frames(cls, position: CursorPosition) -> list[Frame]:
"""
Right now it's a fake implementation, where we return the only existing frame.
"""
assert cls.frames
return [next(iter(cls.frames.values()))]
@classmethod
def get_frame(cls, frame_id) -> Frame:
assert cls.frames
return cls.frames[frame_id]
| 3.25 | 3 |
src/otp_yubikey/models.py | moggers87/django-otp-yubikey | 0 | 4431 | from __future__ import absolute_import, division, print_function, unicode_literals
from base64 import b64decode
from binascii import hexlify, unhexlify
from struct import pack
import six
from django.db import models
from django.utils.encoding import force_text
from django_otp.models import Device
from django_otp.util import hex_validator, random_hex
from yubiotp.client import YubiClient10, YubiClient11, YubiClient20
from yubiotp.modhex import modhex
from yubiotp.otp import decode_otp
def default_id():
return force_text(random_hex(6))
def id_validator(value):
return hex_validator(6)(value)
def default_key():
return force_text(random_hex(16))
def key_validator(value):
return hex_validator(16)(value)
class YubikeyDevice(Device):
"""
Represents a locally-verified YubiKey OTP
:class:`~django_otp.models.Device`.
.. attribute:: private_id
*CharField*: The 6-byte private ID (hex-encoded).
.. attribute:: key
*CharField*: The 16-byte AES key shared with this YubiKey
(hex-encoded).
.. attribute:: session
*PositiveIntegerField*: The non-volatile session counter most recently
used by this device.
.. attribute:: counter
*PositiveIntegerField*: The volatile session usage counter most
recently used by this device.
"""
private_id = models.CharField(
max_length=12,
validators=[id_validator],
default=default_id,
verbose_name="Private ID",
help_text="The 6-byte private ID (hex-encoded)."
)
key = models.CharField(
max_length=32,
validators=[key_validator],
default=default_key,
help_text="The 16-byte AES key shared with this YubiKey (hex-encoded)."
)
session = models.PositiveIntegerField(
default=0,
help_text="The non-volatile session counter most recently used by this device."
)
counter = models.PositiveIntegerField(
default=0,
help_text="The volatile session usage counter most recently used by this device."
)
class Meta(Device.Meta):
verbose_name = "Local YubiKey device"
def public_id(self):
"""
The public ID of this device is the four-byte, big-endian,
modhex-encoded primary key.
"""
return modhex(pack('>I', self.id))
public_id.short_description = 'Public Identity'
public_id.admin_order_field = 'id'
@property
def bin_key(self):
return unhexlify(self.key.encode())
def verify_token(self, token):
if isinstance(token, six.text_type):
token = token.encode('utf-8')
try:
public_id, otp = decode_otp(token, self.bin_key)
except Exception:
return False
if public_id != self.public_id():
return False
if hexlify(otp.uid) != self.private_id.encode():
return False
if otp.session < self.session:
return False
if (otp.session == self.session) and (otp.counter <= self.counter):
return False
# All tests pass. Update the counters and return the good news.
self.session = otp.session
self.counter = otp.counter
self.save()
return True
class ValidationService(models.Model):
"""
Represents a YubiKey validation web service. By default, this will point to
Yubico's official hosted service, which you can customize. You can also
create instances to point at any other service implementing the same
protocol.
.. attribute:: name
*CharField*: The name of this validation service.
.. attribute:: api_id
*IntegerField*: Your API ID. The server needs this to sign responsees.
(Default: 1)
.. attribute:: api_key
*CharField*: Your base64-encoded API key, used to sign requests. This
is optional but strongly recommended. (Default: ``''``)
.. attribute:: base_url
*URLField*: The base URL of the verification service. Defaults to
Yubico's hosted API.
.. attribute:: api_version
*CharField*: The version of the validation API to use: '1.0', '1.1', or
'2.0'. (Default: '2.0')
.. attribute:: use_ssl
*BooleanField*: If ``True``, we'll use the HTTPS versions of the
default URLs. Because :mod:`urllib2` does not verify certificates, this
provides little benefit. (Default: ``False``).
.. attribute:: param_sl
*CharField*: The level of syncing required. See
:class:`~yubiotp.client.YubiClient20`.
.. attribute:: param_timeout
*CharField*: The time to allow for syncing. See
:class:`~yubiotp.client.YubiClient20`.
"""
API_VERSIONS = ['1.0', '1.1', '2.0']
name = models.CharField(
max_length=32,
help_text="The name of this validation service."
)
api_id = models.IntegerField(
default=1,
verbose_name="API ID",
help_text="Your API ID."
)
api_key = models.CharField(
max_length=64,
blank=True,
default='',
verbose_name="API key",
help_text="Your base64-encoded API key."
)
base_url = models.URLField(
blank=True,
default='',
verbose_name="Base URL",
help_text="The base URL of the verification service. Defaults to Yubico's hosted API."
)
api_version = models.CharField(
max_length=8,
choices=list(zip(API_VERSIONS, API_VERSIONS)),
default='2.0',
help_text="The version of the validation api to use."
)
use_ssl = models.BooleanField(
default=False,
verbose_name="Use SSL",
help_text="Use HTTPS API URLs by default?"
)
param_sl = models.CharField(
max_length=16,
blank=True,
default=None,
verbose_name="SL",
help_text="The level of syncing required."
)
param_timeout = models.CharField(
max_length=16,
blank=True,
default=None,
verbose_name="Timeout",
help_text="The time to allow for syncing."
)
class Meta(object):
verbose_name = "YubiKey validation service"
def __unicode__(self):
return self.name
def get_client(self):
api_key = b64decode(self.api_key.encode()) or None
if self.api_version == '2.0':
client = YubiClient20(self.api_id, api_key, self.use_ssl, False, self.param_sl or None, self.param_timeout or None)
elif self.api_version == '1.1':
client = YubiClient11(self.api_id, api_key, self.use_ssl)
else:
client = YubiClient10(self.api_id, api_key, self.use_ssl)
if self.base_url:
client.base_url = self.base_url
return client
class RemoteYubikeyDevice(Device):
"""
Represents a YubiKey device that is to be verified with a remote validation
service. In order create these devices, you must have at least one
:class:`~otp_yubikey.models.ValidationService` in the database.
.. attribute:: service
*ForeignKey*: The validation service to use for this device.
.. attribute:: public_id
*CharField*: The public identity of the YubiKey (modhex-encoded).
"""
service = models.ForeignKey(ValidationService, on_delete=models.CASCADE)
public_id = models.CharField(max_length=32, verbose_name="Public ID", help_text="The public identity of the YubiKey (modhex-encoded).")
class Meta(Device.Meta):
verbose_name = "Remote YubiKey device"
def verify_token(self, token):
verified = False
if token[:-32] == self.public_id:
client = self.service.get_client()
response = client.verify(token)
verified = response.is_ok()
return verified
| 2.140625 | 2 |
v1/hsvfilter.py | gavinIRL/RHBot | 0 | 4432 | import typing
# custom data structure to hold the state of an HSV filter
class HsvFilter:
def __init__(self, hMin=None, sMin=None, vMin=None, hMax=None, sMax=None, vMax=None,
sAdd=None, sSub=None, vAdd=None, vSub=None):
self.hMin = hMin
self.sMin = sMin
self.vMin = vMin
self.hMax = hMax
self.sMax = sMax
self.vMax = vMax
self.sAdd = sAdd
self.sSub = sSub
self.vAdd = vAdd
self.vSub = vSub
# Putting this here out of the way as it's a chonk
# For a given item string case it will return the optimal filter and the correct position to look
def grab_object_preset(object_name=None, **kwargs) -> typing.Tuple[HsvFilter, list]:
if object_name is None:
#print("Using default filter")
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [3, 32, 1280, 794]
if object_name == "dungeon_check":
return HsvFilter(0, 73, 94, 106, 255, 255, 0, 0, 0, 0), [1083, 295, 1188, 368]
if object_name == "enemy_map_loc":
#print("Using enemy location filter")
if kwargs.get("big_map"):
return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [485, 280, 900, 734]
return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [1100, 50, 1260, 210]
if object_name == "player_map_loc":
if kwargs.get("big_map"):
return HsvFilter(31, 94, 86, 73, 255, 255, 0, 0, 0, 0), [485, 280, 900, 734]
return HsvFilter(31, 94, 86, 73, 255, 255, 0, 0, 0, 0), [1100, 50, 1260, 210]
if object_name == "other_player_map_loc":
if kwargs.get("big_map"):
return HsvFilter(16, 172, 194, 32, 255, 255, 0, 0, 70, 37), [485, 280, 900, 734]
return HsvFilter(16, 172, 194, 32, 255, 255, 0, 0, 70, 37), [1100, 50, 1260, 210]
if object_name == "loot_distant":
return HsvFilter(14, 116, 33, 32, 210, 59, 16, 0, 3, 0), [10, 145, 1084, 684]
if object_name == "loot_near":
return HsvFilter(0, 155, 135, 31, 240, 217, 0, 0, 0, 0), [460, 420, 855, 710]
if object_name == "prompt_press_x_pickup":
return HsvFilter(78, 110, 110, 97, 189, 255, 0, 0, 0, 0), [1080, 660, 1255, 725]
if object_name == "message_section_cleared":
return HsvFilter(0, 0, 214, 179, 65, 255, 0, 0, 0, 17), [464, 600, 855, 680]
if object_name == "message_go":
return HsvFilter(32, 114, 89, 58, 255, 255, 0, 12, 0, 0), [600, 222, 700, 275]
if object_name == "enemy_nametag":
return HsvFilter(49, 0, 139, 91, 30, 197, 0, 0, 40, 38), [10, 145, 1084, 684]
if object_name == "message_boss_encounter":
return HsvFilter(0, 92, 128, 13, 255, 255, 0, 0, 0, 0), [630, 520, 1120, 680]
if object_name == "display_boss_name_and_healthbar":
return HsvFilter(0, 92, 123, 29, 255, 255, 0, 0, 0, 20), [415, 533, 888, 700]
if object_name == "loot_chest_normal":
# This is a difficult one to separate
return HsvFilter(0, 34, 38, 28, 152, 124, 0, 0, 5, 12), [10, 145, 1084, 684]
if object_name == "map_outline":
if kwargs.get("big_map"):
return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [485, 280, 900, 734]
return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [1100, 50, 1260, 210]
if object_name == "gate_map_pos":
# This is a very difficult one to separate
if kwargs.get("big_map"):
return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [485, 280, 900, 734]
return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [1100, 50, 1260, 210]
if object_name == "prompt_move_reward_screen":
return HsvFilter(72, 98, 92, 105, 255, 225, 0, 54, 24, 38)
if object_name == "prompt_select_card":
return HsvFilter(79, 149, 140, 255, 255, 255, 0, 0, 0, 0)
if object_name == "event_chest_special_appear":
return HsvFilter(0, 124, 62, 88, 217, 246, 0, 0, 0, 0)
if object_name == "inventory_green_item":
return HsvFilter(37, 147, 0, 61, 255, 255, 0, 0, 0, 0)
if object_name == "inventory_blue_item":
return HsvFilter(79, 169, 0, 109, 246, 188, 0, 0, 0, 0)
if object_name == "inventory_yellow_item":
# This is a dangerous one as it can barely
# distinguish against green items and vice versa
return HsvFilter(19, 91, 107, 31, 168, 181, 0, 11, 32, 21)
if object_name == "inventory_purple_item":
return HsvFilter(126, 153, 0, 255, 255, 255, 0, 0, 0, 0)
if object_name == "button_repair":
return None, [208, 600]
# These are all To be done later
if object_name == "event_card_trade":
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0)
if object_name == "event_otherworld":
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0)
if object_name == "loot_chest_special":
if kwargs.get("big_map"):
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [10, 145, 1084, 684]
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [10, 145, 1084, 684]
if object_name == "cards":
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [735, 32, 1085, 100]
if object_name == "enemy_arrow":
return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [10, 145, 1084, 684]
# Buttons for clicking, known positions
if object_name == "button_explore_again":
return None, []
if object_name == "button_choose_map":
return None, []
if object_name == "button_open_store":
return None, []
if object_name == "button_go_town":
return None, []
if object_name == "button_inv_equipment":
return None, []
if object_name == "button_inv_consume":
return None, []
if object_name == "button_inv_other":
return None, []
if object_name == "button_repair_confirm":
return None, []
if object_name == "inv_grid_location":
return None, [533+44*kwargs.get("col"), 277+44*kwargs.get("row")]
| 2.953125 | 3 |
glue/core/tests/test_state_objects.py | HPLegion/glue | 0 | 4433 | <reponame>HPLegion/glue
import numpy as np
from numpy.testing import assert_allclose
from echo import CallbackProperty, ListCallbackProperty
from glue.core import Data, DataCollection
from .test_state import clone
from ..state_objects import (State, StateAttributeLimitsHelper,
StateAttributeSingleValueHelper,
StateAttributeHistogramHelper)
class SimpleTestState(State):
a = CallbackProperty()
b = CallbackProperty()
flat = ListCallbackProperty()
nested = ListCallbackProperty()
def test_state_serialization():
state1 = SimpleTestState()
state1.a = 2
state1.b = 'hello'
state1.flat = [1, 3, 4]
sub_state = SimpleTestState()
sub_state.a = 3
sub_state.b = 'blah'
sub_state.flat = [1, 2]
sub_state.nested = []
state1.nested = [1, 3, sub_state]
state2 = clone(state1)
assert state2.a == 2
assert state2.b == 'hello'
assert state2.flat == [1, 3, 4]
assert state2.nested[0:2] == [1, 3]
assert state2.nested[2].a == 3
assert state2.nested[2].b == 'blah'
assert state2.nested[2].flat == [1, 2]
assert state2.nested[2].nested == []
EXPECTED_STR = """
a: 2
b: hello
flat: <CallbackList with 3 elements>
nested: <CallbackList with 3 elements>
"""
EXPECTED_REPR = """
<SimpleTestState
a: 2
b: hello
flat: <CallbackList with 3 elements>
nested: <CallbackList with 3 elements>
>
"""
def test_state_str_repr():
state1 = SimpleTestState()
state1.a = 2
state1.b = 'hello'
state1.flat = [1, 3, 4]
sub_state = SimpleTestState()
state1.nested = [1, 3, sub_state]
assert str(state1) == EXPECTED_STR.strip()
assert repr(state1) == EXPECTED_REPR.strip()
class TestStateAttributeLimitsHelper():
def setup_method(self, method):
self.data = Data(x=np.linspace(-100, 100, 10000),
y=np.linspace(2, 3, 10000), label='test_data')
self.data_collection = DataCollection([self.data])
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
lower = CallbackProperty()
upper = CallbackProperty()
log = CallbackProperty(False)
scale = CallbackProperty(100)
self.state = SimpleState()
self.helper = StateAttributeLimitsHelper(self.state, attribute='comp',
lower='lower', upper='upper',
percentile='scale', log='log')
self.state.data = self.data
self.state.comp = self.data.id['x']
self.x_id = self.data.main_components[0]
self.y_id = self.data.main_components[1]
def test_minmax(self):
assert self.helper.lower == -100
assert self.helper.upper == +100
def test_change_attribute(self):
self.helper.attribute = self.y_id
assert self.helper.lower == 2
assert self.helper.upper == 3
self.helper.attribute = self.x_id
assert self.helper.lower == -100
assert self.helper.upper == +100
def test_change_percentile(self):
# Changing scale mode updates the limits
self.helper.percentile = 99.5
assert_allclose(self.helper.lower, -99.5)
assert_allclose(self.helper.upper, +99.5)
self.helper.percentile = 99
assert_allclose(self.helper.lower, -99)
assert_allclose(self.helper.upper, +99)
self.helper.percentile = 90
assert_allclose(self.helper.lower, -90)
assert_allclose(self.helper.upper, +90)
# When switching to custom, the last limits are retained
self.helper.percentile = "Custom"
assert_allclose(self.helper.lower, -90)
assert_allclose(self.helper.upper, +90)
def test_percentile_cached(self):
# Make sure that if we change scale and change attribute, the scale
# modes are cached on a per-attribute basis.
self.helper.percentile = 99.5
self.state.comp = self.y_id
assert self.helper.percentile == 100
self.helper.percentile = 99
self.state.comp = self.x_id
assert self.helper.percentile == 99.5
self.state.comp = self.y_id
assert self.helper.percentile == 99
def test_flip_button(self):
self.helper.flip_limits()
assert self.helper.lower == +100
assert self.helper.upper == -100
# Make sure that values were re-cached when flipping
self.state.comp = self.y_id
assert self.helper.lower == 2
assert self.helper.upper == 3
self.state.comp = self.x_id
assert self.helper.lower == +100
assert self.helper.upper == -100
def test_manual_edit(self):
# Make sure that values are re-cached when edited manually
self.helper.percentile = "Custom"
self.state.lower = -122
self.state.upper = 234
self.helper.log = True
assert self.helper.lower == -122
assert self.helper.upper == 234
assert self.helper.log
self.state.comp = self.y_id
assert self.helper.lower == 2
assert self.helper.upper == 3
assert not self.helper.log
self.state.comp = self.x_id
assert self.helper.lower == -122
assert self.helper.upper == 234
assert self.helper.log
class TestStateAttributeSingleValueHelper():
def setup_method(self, method):
self.data = Data(x=np.linspace(-100, 30, 9999),
y=np.linspace(2, 3, 9999), label='test_data')
self.data_collection = DataCollection([self.data])
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
val = CallbackProperty()
self.state = SimpleState()
self.helper = StateAttributeSingleValueHelper(self.state, attribute='comp',
function=np.nanmedian, value='val')
self.state.data = self.data
self.state.comp = self.data.id['x']
self.x_id = self.data.main_components[0]
self.y_id = self.data.main_components[1]
def test_value(self):
assert self.helper.value == -35.
def test_change_attribute(self):
self.helper.attribute = self.y_id
assert self.helper.value == 2.5
self.helper.attribute = self.x_id
assert self.helper.value == -35
def test_manual_edit(self):
self.state.val = 42.
assert self.helper.value == 42
self.state.comp = self.y_id
assert self.helper.value == 2.5
self.state.comp = self.x_id
assert self.helper.value == 42
class TestStateAttributeHistogramHelper():
def setup_method(self, method):
self.data = Data(x=[-3.2, 4.3, 2.2, 5.4, 7.2, -1.1, 2.3],
y=['a', 'f', 'd', 'e', 'f', 'f', 'a'], label='test_data')
self.data_collection = DataCollection([self.data])
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
x_min = CallbackProperty()
x_max = CallbackProperty()
n_bin = CallbackProperty()
self.state = SimpleState()
self.helper = StateAttributeHistogramHelper(self.state, attribute='comp',
lower='x_min', upper='x_max', n_bin='n_bin')
self.state.data = self.data
def test_default_numerical(self):
self.state.comp = self.data.id['x']
assert self.state.x_min == -3.2
assert self.state.x_max == 7.2
assert self.state.n_bin == 15
def test_default_categorical(self):
self.state.comp = self.data.id['y']
assert self.state.x_min == -0.5
assert self.state.x_max == 3.5
assert self.state.n_bin == 4
def test_hitting_limits(self):
# FIXME: here we modify the internal defaults rather than making a new
# state helper, but this could be improved
self.helper._default_n_bin = 4
self.helper._max_n_bin = 3
self.state.comp = self.data.id['x']
assert self.state.x_min == -3.2
assert self.state.x_max == 7.2
assert self.state.n_bin == 4
self.state.comp = self.data.id['y']
assert self.state.x_min == -0.5
assert self.state.x_max == 3.5
assert self.state.n_bin == 3
def test_caching(self):
self.state.comp = self.data.id['x']
self.state.x_min = 2
self.state.x_max = 7
self.state.n_bin = 8
self.state.comp = self.data.id['y']
self.state.x_min = 1.5
self.state.x_max = 3.5
self.state.n_bin = 3
self.state.comp = self.data.id['x']
assert self.state.x_min == 2
assert self.state.x_max == 7
assert self.state.n_bin == 8
self.state.comp = self.data.id['y']
assert self.state.x_min == 1.5
assert self.state.x_max == 3.5
assert self.state.n_bin == 3
def test_histogram_helper_common_n_bin():
data = Data(x=[-3.2, 4.3, 2.2],
y=['a', 'f', 'd'],
z=[1.1, 2.3, 1.2],
label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
x_min = CallbackProperty()
x_max = CallbackProperty()
n_bin = CallbackProperty()
common = CallbackProperty()
state = SimpleState()
helper = StateAttributeHistogramHelper(state, attribute='comp',
lower='x_min', upper='x_max', n_bin='n_bin',
common_n_bin='common')
state.data = data
state.comp = data.id['x']
state.n_bin = 9
state.comp = data.id['y']
assert state.n_bin == 3
state.comp = data.id['z']
assert state.n_bin == 15
state.n_bin = 12
state.common = True
state.comp = data.id['x']
assert state.n_bin == 12
state.n_bin = 11
state.comp = data.id['y']
assert state.n_bin == 3
state.comp = data.id['z']
assert state.n_bin == 11
state.common = False
state.n_bin = 13
state.comp = data.id['x']
assert state.n_bin == 11
def test_histogram_helper_common_n_bin_active():
# Make sure that common_n_bin works as expected if True from start
data = Data(x=[-3.2, 4.3, 2.2],
y=['a', 'f', 'd'],
z=[1.1, 2.3, 1.2],
label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
x_min = CallbackProperty()
x_max = CallbackProperty()
n_bin = CallbackProperty()
common = CallbackProperty(True)
state = SimpleState()
helper = StateAttributeHistogramHelper(state, attribute='comp',
lower='x_min', upper='x_max', n_bin='n_bin',
common_n_bin='common')
state.data = data
state.comp = data.id['x']
state.n_bin = 9
state.comp = data.id['z']
assert state.n_bin == 9
state.n_bin = 12
state.common = True
state.comp = data.id['x']
assert state.n_bin == 12
state.n_bin = 11
state.comp = data.id['y']
assert state.n_bin == 3
state.comp = data.id['z']
assert state.n_bin == 11
state.common = False
state.n_bin = 13
state.comp = data.id['x']
assert state.n_bin == 11
def test_limits_helper_initial_values():
# Regression test for a bug that occurred if the limits cache was empty
# but some attributes were set to values - in this case we don't want to
# override the existing values.
data = Data(x=np.linspace(-100, 100, 10000),
y=np.linspace(2, 3, 10000), label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
lower = CallbackProperty()
upper = CallbackProperty()
state = SimpleState()
state.lower = 1
state.upper = 2
state.comp = data.id['x']
helper = StateAttributeLimitsHelper(state, attribute='comp',
lower='lower', upper='upper')
assert helper.lower == 1
assert helper.upper == 2
class DatetimeState(State):
a = CallbackProperty()
def test_state_serialization_datetime64():
state1 = DatetimeState()
state1.a = np.datetime64(100, 'D')
state2 = clone(state1)
assert state2.a == np.datetime64(100, 'D')
def test_nan_inf_minmax():
data = Data(x=[3, 1, -2, np.inf, np.nan], label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
lower = CallbackProperty()
upper = CallbackProperty()
percentile = CallbackProperty()
log = CallbackProperty()
state = SimpleState()
helper = StateAttributeLimitsHelper(state, attribute='comp', # noqa
lower='lower', upper='upper',
percentile='percentile', log='log')
state.data = data
state.comp = data.id['x']
assert state.lower == -2
assert state.upper == +3
state.log = True
assert state.lower == +1
assert state.upper == +3
state.log = False
state.percentile = 99
assert_allclose(state.lower, -1.97)
assert_allclose(state.upper, +2.98)
def test_percentile_no_log():
# Regression test for a bug that caused a crash if the state class had a
# percentile attribute but no log.
data = Data(x=np.linspace(-100, 100, 10000),
y=np.linspace(2, 3, 10000), label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
lower = CallbackProperty()
upper = CallbackProperty()
scale = CallbackProperty()
state = SimpleState()
state.comp = data.id['x']
state.lower = 2
state.upper = 4
helper = StateAttributeLimitsHelper(state, attribute='comp',
lower='lower', upper='upper',
percentile='scale')
state.scale = 90
| 2.515625 | 3 |
ecommerce_api/core/cart/exceptions.py | victormartinez/ecommerceapi | 0 | 4434 | <reponame>victormartinez/ecommerceapi
from typing import Iterable, Optional
class ProductsNotFound(Exception):
def __init__(self, product_ids: Optional[Iterable[int]] = None):
self.product_ids = product_ids or []
self.message = "One or more products are invalid."
super().__init__(self.message)
| 2.796875 | 3 |
test/unit/test_record.py | jsoref/neo4j-python-driver | 0 | 4435 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2018 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from neo4j.v1 import Record
class RecordTestCase(TestCase):
def test_record_equality(self):
record1 = Record(["name", "empire"], ["Nigel", "The British Empire"])
record2 = Record(["name", "empire"], ["Nigel", "The British Empire"])
record3 = Record(["name", "empire"], ["Stefan", "Das Deutschland"])
assert record1 == record2
assert record1 != record3
assert record2 != record3
def test_record_hashing(self):
record1 = Record(["name", "empire"], ["Nigel", "The British Empire"])
record2 = Record(["name", "empire"], ["Nigel", "The British Empire"])
record3 = Record(["name", "empire"], ["Stefan", "Das Deutschland"])
assert hash(record1) == hash(record2)
assert hash(record1) != hash(record3)
assert hash(record2) != hash(record3)
def test_record_iter(self):
a_record = Record(["name", "empire"], ["Nigel", "The British Empire"])
assert list(a_record.__iter__()) == ["name", "empire"]
def test_record_copy(self):
original = Record(["name", "empire"], ["Nigel", "The British Empire"])
duplicate = original.copy()
assert dict(original) == dict(duplicate)
assert original.keys() == duplicate.keys()
assert original is not duplicate
def test_record_as_dict(self):
a_record = Record(["name", "empire"], ["Nigel", "The British Empire"])
assert dict(a_record) == {"name": "Nigel", "empire": "The British Empire"}
def test_record_as_list(self):
a_record = Record(["name", "empire"], ["Nigel", "The British Empire"])
assert list(a_record) == ["name", "empire"]
def test_record_len(self):
a_record = Record(["name", "empire"], ["Nigel", "The British Empire"])
assert len(a_record) == 2
def test_record_repr(self):
a_record = Record(["name", "empire"], ["Nigel", "The British Empire"])
assert repr(a_record) == "<Record name='Nigel' empire='The British Empire'>"
def test_record_data(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertEqual(r.data(), {"name": "Alice", "age": 33, "married": True})
self.assertEqual(r.data("name"), {"name": "Alice"})
self.assertEqual(r.data("age", "name"), {"age": 33, "name": "Alice"})
self.assertEqual(r.data("age", "name", "shoe size"), {"age": 33, "name": "Alice", "shoe size": None})
self.assertEqual(r.data(0, "name"), {"name": "Alice"})
self.assertEqual(r.data(0), {"name": "Alice"})
self.assertEqual(r.data(1, 0), {"age": 33, "name": "Alice"})
with self.assertRaises(IndexError):
_ = r.data(1, 0, 999)
def test_record_keys(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertEqual(r.keys(), ("name", "age", "married"))
def test_record_values(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertEqual(r.values(), ("Alice", 33, True))
self.assertEqual(r.values("name"), ("Alice",))
self.assertEqual(r.values("age", "name"), (33, "Alice"))
self.assertEqual(r.values("age", "name", "shoe size"), (33, "Alice", None))
self.assertEqual(r.values(0, "name"), ("Alice", "Alice"))
self.assertEqual(r.values(0), ("Alice",))
self.assertEqual(r.values(1, 0), (33, "Alice"))
with self.assertRaises(IndexError):
_ = r.values(1, 0, 999)
def test_record_items(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertEqual(r.items(), [("name", "Alice"), ("age", 33), ("married", True)])
self.assertEqual(r.items("name"), [("name", "Alice")])
self.assertEqual(r.items("age", "name"), [("age", 33), ("name", "Alice")])
self.assertEqual(r.items("age", "name", "shoe size"), [("age", 33), ("name", "Alice"), ("shoe size", None)])
self.assertEqual(r.items(0, "name"), [("name", "Alice"), ("name", "Alice")])
self.assertEqual(r.items(0), [("name", "Alice")])
self.assertEqual(r.items(1, 0), [("age", 33), ("name", "Alice")])
with self.assertRaises(IndexError):
_ = r.items(1, 0, 999)
def test_record_index(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertEqual(r.index("name"), 0)
self.assertEqual(r.index("age"), 1)
self.assertEqual(r.index("married"), 2)
with self.assertRaises(KeyError):
_ = r.index("shoe size")
self.assertEqual(r.index(0), 0)
self.assertEqual(r.index(1), 1)
self.assertEqual(r.index(2), 2)
with self.assertRaises(IndexError):
_ = r.index(3)
with self.assertRaises(TypeError):
_ = r.index(None)
def test_record_value(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertEqual(r.value(), "Alice")
self.assertEqual(r.value("name"), "Alice")
self.assertEqual(r.value("age"), 33)
self.assertEqual(r.value("married"), True)
self.assertEqual(r.value("shoe size"), None)
self.assertEqual(r.value("shoe size", 6), 6)
self.assertEqual(r.value(0), "Alice")
self.assertEqual(r.value(1), 33)
self.assertEqual(r.value(2), True)
self.assertEqual(r.value(3), None)
self.assertEqual(r.value(3, 6), 6)
with self.assertRaises(TypeError):
_ = r.value(None)
def test_record_contains(self):
r = Record(["name", "age", "married"], ["Alice", 33, True])
self.assertTrue("name" in r)
self.assertTrue("age" in r)
self.assertTrue("married" in r)
self.assertFalse("shoe size" in r)
self.assertTrue(0 in r)
self.assertTrue(1 in r)
self.assertTrue(2 in r)
self.assertFalse(3 in r)
with self.assertRaises(TypeError):
_ = r.index(None)
| 2.609375 | 3 |
tests/integration_tests/test_dashboards.py | hugocool/explainerdashboard | 1 | 4436 | <reponame>hugocool/explainerdashboard
import dash
from catboost import CatBoostClassifier, CatBoostRegressor
from xgboost import XGBClassifier, XGBRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer
from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names
from explainerdashboard.dashboards import ExplainerDashboard
def get_classification_explainer(xgboost=False, include_y=True):
X_train, y_train, X_test, y_test = titanic_survive()
if xgboost:
model = XGBClassifier().fit(X_train, y_train)
else:
model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train)
if include_y:
explainer = ClassifierExplainer(
model, X_test, y_test,
cats=['Sex', 'Deck', 'Embarked'],
labels=['Not survived', 'Survived'])
else:
explainer = ClassifierExplainer(
model, X_test,
cats=['Sex', 'Deck', 'Embarked'],
labels=['Not survived', 'Survived'])
explainer.calculate_properties()
return explainer
def get_regression_explainer(xgboost=False, include_y=True):
X_train, y_train, X_test, y_test = titanic_fare()
train_names, test_names = titanic_names()
if xgboost:
model = XGBRegressor().fit(X_train, y_train)
else:
model = RandomForestRegressor(n_estimators=50, max_depth=10).fit(X_train, y_train)
if include_y:
reg_explainer = RegressionExplainer(model, X_test, y_test,
cats=['Sex', 'Deck', 'Embarked'],
idxs=test_names,
units="$")
else:
reg_explainer = RegressionExplainer(model, X_test,
cats=['Sex', 'Deck', 'Embarked'],
idxs=test_names,
units="$")
reg_explainer.calculate_properties()
return reg_explainer
def get_multiclass_explainer(xgboost=False, include_y=True):
X_train, y_train, X_test, y_test = titanic_embarked()
train_names, test_names = titanic_names()
if xgboost:
model = XGBClassifier().fit(X_train, y_train)
else:
model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train)
if include_y:
if xgboost:
multi_explainer = ClassifierExplainer(model, X_test, y_test,
model_output='logodds',
cats=['Sex', 'Deck'],
labels=['Queenstown', 'Southampton', 'Cherbourg'])
else:
multi_explainer = ClassifierExplainer(model, X_test, y_test,
cats=['Sex', 'Deck'],
labels=['Queenstown', 'Southampton', 'Cherbourg'])
else:
if xgboost:
multi_explainer = ClassifierExplainer(model, X_test,
model_output='logodds',
cats=['Sex', 'Deck'],
labels=['Queenstown', 'Southampton', 'Cherbourg'])
else:
multi_explainer = ClassifierExplainer(model, X_test,
cats=['Sex', 'Deck'],
labels=['Queenstown', 'Southampton', 'Cherbourg'])
multi_explainer.calculate_properties()
return multi_explainer
def get_catboost_classifier():
X_train, y_train, X_test, y_test = titanic_survive()
train_names, test_names = titanic_names()
model = CatBoostClassifier(iterations=100, verbose=0).fit(X_train, y_train)
explainer = ClassifierExplainer(
model, X_test, y_test,
cats=[{'Gender': ['Sex_female', 'Sex_male', 'Sex_nan']},
'Deck', 'Embarked'],
labels=['Not survived', 'Survived'],
idxs=test_names)
X_cats, y_cats = explainer.X_merged, explainer.y.astype("int")
model = CatBoostClassifier(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7])
explainer = ClassifierExplainer(model, X_cats, y_cats, idxs=X_test.index)
explainer.calculate_properties(include_interactions=False)
return explainer
def get_catboost_regressor():
X_train, y_train, X_test, y_test = titanic_fare()
model = CatBoostRegressor(iterations=5, verbose=0).fit(X_train, y_train)
explainer = RegressionExplainer(model, X_test, y_test,
cats=["Sex", 'Deck', 'Embarked'])
X_cats, y_cats = explainer.X_merged, explainer.y
model = CatBoostRegressor(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7])
explainer = RegressionExplainer(model, X_cats, y_cats, idxs=X_test.index)
explainer.calculate_properties(include_interactions=False)
return explainer
def test_classification_dashboard(dash_duo):
explainer = get_classification_explainer()
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_regression_dashboard(dash_duo):
explainer = get_regression_explainer()
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=20)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_simple_classification_dashboard(dash_duo):
explainer = get_classification_explainer()
db = ExplainerDashboard(explainer, title="testing", responsive=False, simple=True)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("#simple-classifier-composite-title", "testing", timeout=20)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_simple_regression_dashboard(dash_duo):
explainer = get_regression_explainer()
db = ExplainerDashboard(explainer, title="testing", responsive=False, simple=True)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("#simple-regression-composite-title", "testing", timeout=20)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_multiclass_dashboard(dash_duo):
explainer = get_multiclass_explainer()
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_xgboost_classification_dashboard(dash_duo):
explainer = get_classification_explainer(xgboost=True)
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_xgboost_regression_dashboard(dash_duo):
explainer = get_regression_explainer(xgboost=True)
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_xgboost_multiclass_dashboard(dash_duo):
explainer = get_multiclass_explainer(xgboost=True)
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_classification_dashboard_no_y(dash_duo):
explainer = get_classification_explainer(include_y=False)
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_regression_dashboard_no_y(dash_duo):
explainer = get_regression_explainer(include_y=False)
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_multiclass_dashboard_no_y(dash_duo):
explainer = get_multiclass_explainer(include_y=False)
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_catboost_classification_dashboard(dash_duo):
explainer = get_catboost_classifier()
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_cat_boost_regression_dashboard(dash_duo):
explainer = get_catboost_regressor()
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error" | 2.5625 | 3 |
code/scripts/GeneratePNG_Preview_AsIs.py | dgrechka/bengaliai-cv19 | 0 | 4437 | import tensorflow as tf
import sys
import os
from glob import glob
import png
sys.path.append(os.path.join(__file__,'..','..'))
from tfDataIngest import tfDataSetParquet as tfDsParquet
inputDataDir = sys.argv[1]
outputDir = sys.argv[2]
# test app
if __name__ == "__main__":
files = glob(os.path.join(inputDataDir,"train*.parquet"))
print("Found {0} parquet files in input dir {1}".format(len(files),inputDataDir))
print("First is {0}".format(files[0]))
ds = tfDsParquet.create_parquet_dataset([files[0]])
for element in ds.as_numpy_iterator():
#print("Iterating...")
sampleId,pixels = element
sampleId = sampleId.decode("utf-8")
fileName = os.path.join(outputDir,"{0}.png".format(sampleId))
png.from_array(pixels, mode="L").save(fileName)
#print(element)
#print("sample name is {0}".format(sampleId))
#print(sampleIds.shape)
#print(pixels.shape)
# a += 1
# if a > 10:
# break
print("Done")
#print("{0} elements in the dataset".format(len(ds.))) | 2.546875 | 3 |
widgets/datepicker_ctrl/codegen.py | RSabet/wxGlade | 225 | 4438 | """\
Code generator functions for wxDatePickerCtrl objects
@copyright: 2002-2007 <NAME>
@copyright: 2014-2016 <NAME>
@copyright: 2016-2021 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import common, compat
import wcodegen
class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter):
tmpl = '%(name)s = %(klass)s(%(parent)s, %(id)s%(style)s)\n'
# XXX the following needs to depend on the code generator when Phoenix is about to be supported fully:
if compat.IS_PHOENIX:
import_modules = ['import wx.adv\n']
if compat.IS_PHOENIX:
def cn(self, name):
# don't process already formatted items again
if name.startswith('wx.'):
return name
if name.startswith('wx'):
return 'wx.adv.' + name[2:]
elif name.startswith('EVT_'):
return 'wx.adv.' + name
return name
def _prepare_tmpl_content(self, obj):
wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj)
self.has_setdefault = int(obj.properties.get('default', 0))
return
class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter):
import_modules = ['<wx/datectrl.h>']
tmpl = '%(name)s = new %(klass)s(%(parent)s, %(id)s, ' \
'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize, ' \
'%(style)s);\n'
prefix_style = False
set_default_style = True
def _prepare_tmpl_content(self, obj):
wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj)
self.has_setdefault = int(obj.properties.get('default', 0))
return
def xrc_code_generator(obj):
xrcgen = common.code_writers['XRC']
class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject):
def write_property(self, name, val, output, tabs):
if name == 'label':
# translate & into _ as accelerator marker
val2 = val.replace('&', '_')
if val.count('&&') > 0:
while True:
index = val.find('&&')
if index < 0:
break
val = val2[:index] + '&&' + val2[index+2:]
else:
val = val2
xrcgen.DefaultXrcObject.write_property(self, name, val, output, tabs)
return DatePickerCtrlXrcObject(obj)
def initialize():
klass = 'wxDatePickerCtrl'
common.class_names['EditDatePickerCtrl'] = klass
common.register('python', klass, PythonDatePickerCtrlGenerator(klass))
common.register('C++', klass, CppDatePickerCtrlGenerator(klass))
common.register('XRC', klass, xrc_code_generator)
| 2.375 | 2 |
train.py | lck1201/simple-effective-3Dpose-baseline | 20 | 4439 | <reponame>lck1201/simple-effective-3Dpose-baseline<gh_stars>10-100
import pprint
import mxnet as mx
from mxnet import gluon
from mxnet import init
from lib.core.get_optimizer import *
from lib.core.metric import MPJPEMetric
from lib.core.loss import MeanSquareLoss
from lib.core.loader import JointsDataIter
from lib.network import get_net
from lib.net_module import *
from lib.utils import *
from lib.dataset.hm36 import hm36
from config import config, gen_config, update_config_from_args, s_args
config = update_config_from_args(config, s_args)
def main():
# Parse config and mkdir output
logger, final_Model_path = create_logger(config)
config.final_Model_path = final_Model_path
gen_config(os.path.join(final_Model_path, 'hyperParams.yaml'))
logger.info('Training config:{}\n'.format(pprint.pformat(config)))
# define context
if config.useGPU:
ctx = [mx.gpu(int(i)) for i in config.gpu.split(',')]
else:
ctx = mx.cpu()
logger.info("Using context:", ctx)
# dataset, generate trainset/ validation set
train_imdbs = []
valid_imdbs = []
for i in range(len(config.DATASET.train_image_set)):
logger.info("Construct Dataset:", config.DATASET.dbname[i], ", Dataset Path:", config.DATASET.dataset_path[i])
train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i],
config.DATASET.root_path[i],
config.DATASET.dataset_path[i]))
valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i],
config.DATASET.root_path[i],
config.DATASET.dataset_path[i],
config.final_Model_path))
data_names = ['hm36data']
label_names = ['hm36label']
train_data_iter = JointsDataIter(train_imdbs[0], runmode=0,
data_names = data_names, label_names=label_names,
shuffle=config.TRAIN.SHUFFLE, batch_size=len(ctx)*config.TRAIN.batchsize, logger=logger)
valid_data_iter = JointsDataIter(valid_imdbs[0], runmode=1,
data_names = data_names, label_names=label_names,
shuffle=False, batch_size=len(ctx)*config.TEST.batchsize, logger=logger)
assert train_data_iter.get_meanstd()['mean3d'].all() == valid_data_iter.get_meanstd()['mean3d'].all()
# network
net = get_net(config)
if config.resume:
ckp_path = os.path.join(config.resumeckp)
net.collect_params().load(ckp_path, ctx=ctx)
else:
net.initialize(init=init.MSRAPrelu(), ctx=ctx)
if config.NETWORK.hybrid:
net.hybridize()
logger.info(net)
# define loss and metric
mean3d = train_data_iter.get_meanstd()['mean3d']
std3d = train_data_iter.get_meanstd()['std3d']
train_metric = MPJPEMetric('train_metric', mean3d, std3d)
eval_metric = MPJPEMetric('valid_metric', mean3d, std3d)
loss = MeanSquareLoss()
# optimizer
optimizer, optimizer_params = get_optimizer(config, ctx)
# train and valid
TrainDBsize = train_data_iter.get_size()
ValidDBsize = valid_data_iter.get_size()
logger.info("Train DB size:", TrainDBsize, "Valid DB size:",ValidDBsize)
if not isinstance(train_data_iter, mx.io.PrefetchingIter):
train_data_iter = mx.io.PrefetchingIter(train_data_iter)
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
for epoch in range(config.TRAIN.begin_epoch, config.TRAIN.end_epoch):
trainNet(net, trainer, train_data_iter, loss, train_metric, epoch, config, logger=logger, ctx=ctx)
validNet(net, valid_data_iter, loss, eval_metric, epoch, config, logger=logger, ctx=ctx)
logger.kill()
if __name__ == '__main__':
main() | 1.773438 | 2 |
FastLinear/generate_memory_bank.py | WangFeng18/dino | 0 | 4440 | <filename>FastLinear/generate_memory_bank.py
import os
from tqdm import tqdm
import torch.backends.cudnn as cudnn
import torch
from datasets import ImageNetInstance, ImageNetInstanceLMDB
from torchvision import transforms
import argparse
from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network
from torch.utils.data import DataLoader
from PIL import ImageFile, Image
import torch.distributed as dist
from lars import *
ImageFile.LOAD_TRUNCATED_IMAGES = True
import warnings
warnings.filterwarnings('ignore')
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
def main():
parser = argparse.ArgumentParser("The first stage of BoostrapSelfSup")
parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed parallel')
parser.add_argument("--task", type=str, default="moco", help="the pretraining models")
parser.add_argument("--pretrained_path", type=str, default="", help="the pretraining models")
parser.add_argument("--save_path", type=str, default="", help="where to save the memory_bank")
parser.add_argument("--backbone", type=str, default="resnet50")
parser.add_argument("--data_path", type=str, default="~/ILSVRC2012/", help="the data path")
parser.add_argument("--batch_size", type=int, default=32, help="batch size")
parser.add_argument("--img_size", type=int, default=224, help="image size")
parser.add_argument("--feat_dim", type=int, default=128, help="feat dimension")
parser.add_argument("--feature_layer", type=str, default='lowdim', help="feature layer")
parser.add_argument('--use-lmdb', action='store_true')
args = parser.parse_args()
pretrained_path = os.path.expanduser(args.pretrained_path)
save_path = os.path.expanduser(args.save_path)
data_path = os.path.expanduser(args.data_path)
batch_size = args.batch_size
feat_dim = args.feat_dim
dist.init_process_group(backend='nccl')
torch.cuda.set_device(args.local_rank)
# network = ResNet(50, frozen_stages=4)
if args.task == 'moco':
network = get_moco_network(pretrained_path, feature_layer=args.feature_layer)
elif args.task == 'swav':
network = get_swav_network(pretrained_path, feature_layer=args.feature_layer)
elif args.task == 'selfboost':
network = get_selfboost_network(pretrained_path, feature_layer=args.feature_layer)
elif args.task == 'minmaxent':
network = get_minmaxent_network(args.backbone, pretrained_path, feature_layer=args.feature_layer)
elif args.task == 'dino':
network = get_dino_network(args.backbone, pretrained_path, feature_layer=args.feature_layer)
elif args.task == 'simclr':
network = get_simclr_network(args.backbone, pretrained_path, feature_layer=args.feature_layer)
elif args.task == 'sup':
network = get_sup_network(args.backbone, pretrained_path, feature_layer=args.feature_layer)
else:
raise NotImplementedError
network.cuda(args.local_rank)
network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank])
cudnn.benchmark = True
augmentation = transforms.Compose([
transforms.Resize(int(256*args.img_size/224), interpolation=Image.BICUBIC),
transforms.CenterCrop(args.img_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
if args.use_lmdb:
train_dataset = ImageNetInstanceLMDB(root=data_path, list_file='train.lmdb', transform=augmentation)
val_dataset = ImageNetInstanceLMDB(root=data_path, list_file='val.lmdb', transform=augmentation)
else:
train_dataset = ImageNetInstance(root=os.path.join(data_path, 'train'), transform=augmentation)
val_dataset = ImageNetInstance(root=os.path.join(data_path, 'val'), transform=augmentation)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, rank=args.local_rank)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, rank=args.local_rank)
n_train_points = len(train_dataset)
n_val_points = len(val_dataset)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, pin_memory=True, num_workers=4)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, pin_memory=True, num_workers=4)
print("Initializing train memory bank: {} points.".format(n_train_points))
train_memory_bank = torch.zeros(n_train_points, feat_dim).to("cpu").detach()
print("Initializing val memory bank: {} points.".format(n_val_points))
val_memory_bank = torch.zeros(n_val_points, feat_dim).to("cpu").detach()
network.eval()
train_sampler.set_epoch(0)
val_sampler.set_epoch(0)
for data in tqdm(train_dataloader):
idx, img, _ = data
idx = idx.cuda(args.local_rank, non_blocking=True)
img = img.cuda(args.local_rank, non_blocking=True)
if True: #args.backbone.startswith('resnet'):
feature = network(img)
else:
feature = network.module.get_intermediate_layers(img, 4)
feature = [x[:, 0] for x in feature]
feature = torch.cat(feature, dim=-1)
feature = concat_all_gather(feature.contiguous())
idx = concat_all_gather(idx)
with torch.no_grad():
train_memory_bank[idx,:] = feature.detach().cpu()
for data in tqdm(val_dataloader):
idx, img, _ = data
idx = idx.cuda(args.local_rank, non_blocking=True)
img = img.cuda(args.local_rank, non_blocking=True)
if True: #args.backbone.startswith('resnet'):
feature = network(img)
else:
feature = network.module.get_intermediate_layers(img, 4)
feature = [x[:, 0] for x in feature]
feature = torch.cat(feature, dim=-1)
feature = concat_all_gather(feature.contiguous())
idx = concat_all_gather(idx)
with torch.no_grad():
val_memory_bank[idx,:] = feature.detach().cpu()
if args.local_rank == 0:
torch.save(
{'train_memory_bank': train_memory_bank,
'val_memory_bank': val_memory_bank
},
args.save_path
)
if __name__ == '__main__':
main()
| 2.1875 | 2 |
tests/utils/test_mercator.py | anuragtr/fabric8-analytics-rudra | 1 | 4441 | <reponame>anuragtr/fabric8-analytics-rudra
import pytest
from rudra.utils.mercator import SimpleMercator
class TestSimpleMercator:
pom_xml_content = """
<project>
<dependencies>
<dependency>
<groupId>grp1.id</groupId>
<artifactId>art1.id</artifactId>
</dependency>
<dependency>
<groupId>grp2.id</groupId>
<artifactId>art2.id</artifactId>
</dependency>
<dependency>
<groupId>grp3.id</groupId>
<artifactId>art3.id</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
</project>
"""
def test_get_dependencies(self):
client = SimpleMercator(self.pom_xml_content)
deps = client.get_dependencies()
assert len(deps) == 3
artifact_ids = [d.artifact_id for d in deps]
assert not {'art1.id', 'art2.id', 'art3.id'}.difference(set(artifact_ids))
group_ids = [d.group_id for d in deps]
assert not {'grp1.id', 'grp2.id', 'grp3.id'}.difference(set(group_ids))
scopes = [d.scope for d in deps]
assert not {'compile', 'test'}.difference(set(scopes))
def test_get_dependencies_with_no_dependencies(self):
client = SimpleMercator('<project></project>'.encode())
deps = client.get_dependencies()
assert len(deps) == 0
def test_get_dependencies_with_no_content(self):
with pytest.raises(ValueError, match='Empty Content .*'):
SimpleMercator('')
def test_find_data_corrupt_pom(self):
content = """
</project>
</project>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>grp1.id</groupId>
<artifactId>art1.id</artifactId>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>grp1.id</groupId>
<artifactId>art1.id</artifactId>
</dependency>
</dependencies>
</project>
"""
client = SimpleMercator(content)
deps = client.get_dependencies()
assert len(deps) == 1
artifact_ids = [d.artifact_id for d in deps]
assert 'art1.id' in artifact_ids
| 2.203125 | 2 |
tests/checks/run_performance_tests.py | stjordanis/mljar-supervised | 1,882 | 4442 | import os
import sys
import unittest
from tests.tests_bin_class.test_performance import *
if __name__ == "__main__":
unittest.main()
| 1.203125 | 1 |
task/CheckAllocations.py | wookiee2187/vc3-login-pod | 1 | 4443 | <gh_stars>1-10
#!/usr/bin/env python
from vc3master.task import VC3Task
class CheckAllocations(VC3Task):
'''
Plugin to do consistency/sanity checks on Allocations.
'''
def runtask(self):
'''
'''
self.log.info("Running task %s" % self.section) | 1.992188 | 2 |
django_airbrake/utils/client.py | Captricity/airbrake-django | 0 | 4444 | <gh_stars>0
import sys
import traceback
from django.conf import settings
from django.urls import resolve
from lxml import etree
from six.moves.urllib.request import urlopen, Request
class Client(object):
API_URL = '%s://airbrake.io/notifier_api/v2/notices'
ERRORS = {
403: "Cannot use SSL",
422: "Invalid XML sent to Airbrake",
500: "Airbrake has braked too hard",
}
DEFAULTS = {
'TIMEOUT': 5,
'USE_SSL': False,
}
@property
def url(self):
scheme = 'http'
if self.settings['USE_SSL']:
scheme = 'https'
return Client.API_URL % scheme
@property
def settings(self):
if getattr(self, '_settings', None):
return self._settings
self._settings = Client.DEFAULTS
self._settings.update(getattr(settings, 'AIRBRAKE', {}))
return self._settings
def notify(self, exception=None, request=None):
headers = {
'Content-Type': 'text/xml'
}
payload = self._generate_xml(exception=exception, request=request)
req = Request(self.url, payload, headers)
resp = urlopen(req, timeout=self.settings['TIMEOUT'])
status = resp.getcode()
if status == 200:
return True
elif status in Client.ERRORS:
raise Exception(Client.ERRORS[status])
def _generate_xml(self, exception=None, request=None):
_, _, trace = sys.exc_info()
notice_em = etree.Element('notice', version='2.0')
tb = traceback.extract_tb(trace)
api_key = etree.SubElement(notice_em, 'api-key').text = self.settings['API_KEY']
notifier_em = etree.SubElement(notice_em, 'notifier')
etree.SubElement(notifier_em, 'name').text = 'django-airbrake'
etree.SubElement(notifier_em, 'version').text = '0.0.4'
url_el = etree.SubElement(notifier_em, 'url')
url_el.text = 'http://example.com'
if request:
request_em = etree.SubElement(notice_em, 'request')
if request.is_secure():
scheme = 'https'
else:
scheme = 'http'
url = '%s://%s%s' % (scheme, request.get_host(),
request.get_full_path())
etree.SubElement(request_em, 'url').text = str(url)
url_el.text = url
cb, _, _ = resolve(request.path)
etree.SubElement(request_em, 'component').text = str(cb.__module__)
etree.SubElement(request_em, 'action').text = str(cb.__name__)
if 'context' in self.settings:
cgi_em = etree.SubElement(request_em, 'cgi-data')
for key, val in list(self.settings['context'].items()):
var = etree.SubElement(cgi_em, 'var')
var.set('key', str(key))
var.text = str(val)
session = list(request.session.items())
if len(session):
session_em = etree.SubElement(request_em, 'session')
for key, val in session:
var = etree.SubElement(session_em, 'var')
var.set('key', str(key))
var.text = str(val)
if exception:
error_em = etree.SubElement(notice_em, 'error')
etree.SubElement(error_em, 'class').text = str(exception.__class__.__name__)
etree.SubElement(error_em, 'message').text = str(exception)
backtrace_em = etree.SubElement(error_em, 'backtrace')
for line in tb:
etree.SubElement(backtrace_em, 'line',
file=str(line[0]),
number=str(line[1]),
method=str(line[2]))
env_em = etree.SubElement(notice_em, 'server-environment')
etree.SubElement(env_em, 'environment-name').text = self.settings.get('ENVIRONMENT', 'development')
return '<?xml version="1.0" encoding="UTF-8"?>%s' % etree.tostring(notice_em)
| 1.992188 | 2 |
src/spaceone/inventory/connector/snapshot.py | jean1042/plugin-azure-cloud-services | 1 | 4445 | <reponame>jean1042/plugin-azure-cloud-services
import logging
from spaceone.inventory.libs.connector import AzureConnector
from spaceone.inventory.error import *
from spaceone.inventory.error.custom import *
__all__ = ['SnapshotConnector']
_LOGGER = logging.getLogger(__name__)
class SnapshotConnector(AzureConnector):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_connect(kwargs.get('secret_data'))
def list_snapshots(self):
try:
return self.compute_client.snapshots.list()
except ConnectionError:
_LOGGER.error(ERROR_CONNECTOR(field='Public IP Address'))
| 1.984375 | 2 |
docs/tutorial/context/app.py | theasylum/wired | 12 | 4446 | <gh_stars>10-100
"""
A customer walks into a store. Do the steps to interact with them:
- Get *a* (not *the*) greeter
- Interact with them
Simple wired application:
- Settings that say what punctuation to use
- Registry
- Two factories that says hello, one for the FrenchCustomer context
- A default Customer and FrenchCustomer
"""
from dataclasses import dataclass
from wired import ServiceRegistry
@dataclass
class Customer:
name: str
@dataclass
class FrenchCustomer(Customer):
pass
@dataclass
class Settings:
punctuation: str
@dataclass
class Greeter:
punctuation: str
greeting: str = 'Hello'
def __call__(self, customer: Customer) -> str:
return f'{self.greeting} {customer.name} {self.punctuation}'
@dataclass
class FrenchGreeter(Greeter):
greeting: str = 'Bonjour'
def __call__(self, customer: Customer) -> str:
return f'{self.greeting} {customer.name} {self.punctuation}'
def setup(settings: Settings) -> ServiceRegistry:
# Make the registry
registry = ServiceRegistry()
# Make the greeter factories, using punctuation from settings
punctuation = settings.punctuation
# First the default greeter, no context
def default_greeter_factory(container) -> Greeter:
# Use the dataclass default for greeting
return Greeter(punctuation=punctuation)
# Register it as a factory using its class for the "key"
registry.register_factory(default_greeter_factory, Greeter)
# Now the French greeter, using context of FrenchCustomer
def french_greeter_factory(container) -> Greeter:
# Use the dataclass default for greeting
return FrenchGreeter(punctuation=punctuation)
# Register it as a factory using its class for the "key", but
# this time register with a "context"
registry.register_factory(
french_greeter_factory, Greeter, context=FrenchCustomer
)
return registry
def greet_customer(registry: ServiceRegistry, customer: Customer) -> str:
# A customer comes in, handle the steps in the greeting
# as a container.
container = registry.create_container()
# Get a Greeter using the customer as context. Use the Customer when
# generating the greeting.
greeter: Greeter = container.get(Greeter, context=customer)
greeting = greeter(customer)
return greeting
def main():
settings = Settings(punctuation='!!')
registry = setup(settings)
# *** Default Customer
# Make a Customer, pass into the "greet_customer" interaction,
# then test the result.
customer = Customer(name='Mary')
assert 'Hello Mary !!' == greet_customer(registry, customer)
# *** French Customer
# Make a FrenchCustomer, pass into the "greet_customer" interaction,
# then test the result.
french_customer = FrenchCustomer(name='Henri')
assert 'Bonjour Henri !!' == greet_customer(registry, french_customer)
| 3.359375 | 3 |
feast/DetectionModules/ldar_program.py | GeoSensorWebLab/FEAST_PtE | 10 | 4447 | <reponame>GeoSensorWebLab/FEAST_PtE
"""
This module defines the LDARProgram class.
"""
import numpy as np
import copy
from .repair import Repair
from ..EmissionSimModules.result_classes import ResultDiscrete, ResultContinuous
class LDARProgram:
"""
An LDAR program contains one or more detection methods and one or more repair methods. Each LDAR program records
the find and repair costs associated with all detection and repair methods in the program. The LDAR program
deploys runs the action methods of each detection and repair method contained in the program. The detection and
repair methods determine their own behavior at each time step.
"""
def __init__(self, gas_field, tech_dict):
"""
:param gas_field: a GasField object
:param tech_dict: a dict containing all of the detection methods to be employed by the LDAR program. The dict
must have the form {"name": DetectionMethod}. All of the relationships between detection methods and between
detection methods and repair methods must be defined by the dispatch_objects specified for each method.
"""
self.emissions = copy.deepcopy(gas_field.emissions)
self.emissions_timeseries = []
self.vents_timeseries = []
#self.emissions_results = ResultContinuous(units='g/s')
#self.vents_results = ResultContinuous(units='g/s')
self.tech_dict = tech_dict
self.repair = {}
self.repair_cost = ResultDiscrete(units='USD')
for tech_name, tech in tech_dict.items():
if type(tech.dispatch_object) is Repair:
self.repair[tech_name + ' ' + tech.dispatch_object.name] = tech.dispatch_object
def action(self, time, gas_field):
"""
Runs the detect method for every tech in tech_dict and runs the repair method
:param time: the simulation time object
:param gas_field: the simulation gas_field object
:return:
"""
for i, tech in enumerate(self.tech_dict.values()):
if hasattr(tech, 'survey_interval') and tech.survey_interval \
and np.mod(time.current_time, tech.survey_interval) < time.delta_t:
tech.action(list(np.linspace(0, gas_field.n_sites - 1, gas_field.n_sites, dtype=int)))
tech.detect(time, gas_field, self.emissions.get_current_emissions(time))
for rep in self.repair.values():
rep.repair(time, self.emissions)
def calc_rep_costs(self, time):
"""
Calculates the total repair costs up to time.current_time, assuming that all reparable emissions that have a
max end_time less than time.current_time have been repaired.
:param time: a FEAST time object
:return: None
"""
for em in self.emissions.emissions.index.unique():
empdf_temp = self.emissions.emissions.loc[[em]]
max_row = empdf_temp[empdf_temp.end_time == empdf_temp.end_time.max()].iloc[0]
if max_row.reparable & (max_row.end_time < time.current_time):
self.repair_cost.append_entry([max_row.end_time, max_row.repair_cost])
| 2.578125 | 3 |
src/CycleGAN.py | sjmoran/SIDGAN | 25 | 4448 | #Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#This program is free software; you can redistribute it and/or modify it under the terms of the BSD 0-Clause License.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the BSD 0-Clause License for more details.
from keras.optimizers import Adam
from models.ICCV_architectures import *
from models.unet import *
from keras.engine.topology import Network
import sys
import tensorflow as tf
from utilities.data_loader import *
class CycleGAN():
def __init__(self,
opt,
image_shape=(256 * 1, 256 * 1, 3),
load_training_data=True,
normalization=InstanceNormalization,
):
self.task = opt.task
self.im_w = opt.im_w
self.im_h = opt.im_h
self.data_root = opt.data_root
self.img_shape = image_shape
self.channels = self.img_shape[-1]
# Fetch data during training instead of pre caching all images
self.use_data_generator = True
self.generator_architecture = opt.generator_architecture
self.use_norm = opt.use_norm
self.add_extra_conv = opt.add_extra_conv
self.image_shapeA = (opt.im_w * 1, opt.im_h * 1, 3)
self.image_shapeA_in = (None, None, 3)
if self.task == 'Long2Short_raw':
self.image_shapeB = (opt.im_w * 1, opt.im_h * 1, 1)
self.image_shapeB_in = (None, None, 3)
else:
self.image_shapeB = (opt.im_w * 1, opt.im_h * 1, 3)
self.image_shapeB_in = (None, None, 3)
# Identity loss - sometimes send images from B to G_A2B (and the opposite) to teach identity mappings
self.use_identity_learning = opt.use_identity_learning
self.identity_mapping_modulus = opt.identity_mapping_modulus # Identity mapping will be done each time the iteration number is divisable with this number
# PatchGAN - if false the discriminator learning rate should be decreased
self.use_patchgan = opt.use_patchgan
self.normalization = normalization
# Loss hyperparameters
self.lambda_1 = opt.lambda_1 # Cyclic loss weight A_2_B
self.lambda_2 = opt.lambda_2 # Cyclic loss weight B_2_A
self.lambda_D = opt.lambda_D # Weight for loss from discriminator guess on synthetic images
# Learning rates
self.learning_rate_D = opt.lr_D
self.learning_rate_G = opt.lr_G
self.beta_1 = opt.beta_1
self.beta_2 = opt.beta_2
self.batch_size = 1
self.clipvalue = opt.clipvalue
self.epsilon_norm = opt.epsilon_norm
# self.crop_res = opt.crop_res
# Resize convolution - instead of transpose convolution in deconvolution layers (uk) - can reduce checkerboard artifacts but the blurring might affect the cycle-consistency
self.use_resize_convolution = opt.use_resize_convolution
# Supervised learning part
self.use_supervised_learning = opt.use_supervised_learning
self.supervised_weight = opt.supervised_weight
self.supervised_loss = opt.supervised_loss
# optimizer
if opt.clipvalue is not None:
self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2, clipvalue=self.clipvalue)
self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2, clipvalue=self.clipvalue)
else:
self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2)
self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2)
# # ======= Discriminator model ==========
if self.generator_architecture == 'ICCV':
D_A = modelDiscriminator(self.image_shapeA, use_patchgan=self.use_patchgan,
disc_use_4_layers=True)
D_B = modelDiscriminator(self.image_shapeB, use_patchgan=self.use_patchgan,
disc_use_4_layers=True)
loss_weights_D = [0.5] # 0.5 since we train on real and synthetic images
loss_weights_D = [0.5] # 0.5 since we train on real and synthetic images
elif self.generator_architecture == 'unet_mini':
D_A = unet_discriminator_mini(self.image_shapeA, use_norm=self.use_norm, epsilon=self.epsilon_norm,
use_patchgan=self.use_patchgan)
D_B = unet_discriminator_mini(self.image_shapeB, use_norm=self.use_norm, epsilon=self.epsilon_norm,
use_patchgan=self.use_patchgan)
loss_weights_D = [0.5] # 0.5 since we train on real and synthetic images
# Discriminator builds
image_A = Input(self.image_shapeA)
image_B = Input(self.image_shapeB)
guess_A = D_A(image_A)
guess_B = D_B(image_B)
self.D_A = Model(inputs=image_A, outputs=guess_A, name='D_A_model')
self.D_B = Model(inputs=image_B, outputs=guess_B, name='D_B_model')
if self.use_patchgan:
self.D_A.compile(optimizer=self.opt_D,
loss=self.lse,
loss_weights=loss_weights_D)
self.D_B.compile(optimizer=self.opt_D,
loss=self.lse,
loss_weights=loss_weights_D)
else:
self.D_A.compile(optimizer=self.opt_D,
loss='binary_crossentropy',
loss_weights=loss_weights_D)
self.D_B.compile(optimizer=self.opt_D,
loss='binary_crossentropy',
loss_weights=loss_weights_D)
# Use Networks to avoid falsy keras error about weight descripancies
self.D_A_static = Network(inputs=image_A, outputs=guess_A, name='D_A_static_model')
self.D_B_static = Network(inputs=image_B, outputs=guess_B, name='D_B_static_model')
# ============= Generator models =======================
# Do note update discriminator weights during generator training
self.D_A_static.trainable = False
self.D_B_static.trainable = False
# Generators
if self.generator_architecture == 'ICCV':
self.G_A2B = modelGenerator(conv_kernel_c7Ak=7,
use_resize_convolution=self.use_resize_convolution, input=self.image_shapeA,
output=self.image_shapeB, name='G_A2B_model')
self.G_B2A = modelGenerator(conv_kernel_c7Ak=7,
use_resize_convolution=self.use_resize_convolution, input=self.image_shapeB,
output=self.image_shapeA, name='G_B2A_model')
elif self.generator_architecture == 'unet_mini':
self.G_A2B = unet_generator_mini(input=self.image_shapeA,
output=self.image_shapeB,
normalization=normalization,
epsilon=self.epsilon_norm,
use_norm=self.use_norm,
add_extra_conv=self.add_extra_conv,
use_resize_convolution=self.use_resize_convolution,
name='G_A2B_model')
self.G_B2A = unet_generator_mini(input=self.image_shapeB,
output=self.image_shapeA,
normalization=normalization,
epsilon=self.epsilon_norm,
use_norm=self.use_norm,
add_extra_conv=self.add_extra_conv,
use_resize_convolution=self.use_resize_convolution,
name='G_B2A_model')
if self.use_identity_learning:
self.G_A2B.compile(optimizer=self.opt_G, loss='MAE')
self.G_B2A.compile(optimizer=self.opt_G, loss='MAE')
# Generator builds
real_A = Input(shape=self.image_shapeA, name='real_A')
real_B = Input(shape=self.image_shapeB, name='real_B')
synthetic_B = self.G_A2B(real_A)
synthetic_A = self.G_B2A(real_B)
dA_guess_synthetic = self.D_A_static(synthetic_A)
dB_guess_synthetic = self.D_B_static(synthetic_B)
reconstructed_A = self.G_B2A(synthetic_B)
reconstructed_B = self.G_A2B(synthetic_A)
model_outputs = [reconstructed_A, reconstructed_B]
compile_losses = [self.cycle_loss, self.cycle_loss, self.lse, self.lse]
compile_weights = [self.lambda_1, self.lambda_2, self.lambda_D, self.lambda_D]
model_outputs.append(dA_guess_synthetic)
model_outputs.append(dB_guess_synthetic)
if self.use_supervised_learning:
model_outputs.append(synthetic_A)
model_outputs.append(synthetic_B)
if self.supervised_loss == 'MAE':
compile_losses.append('MAE')
compile_losses.append('MAE')
compile_weights.append(self.supervised_weight)
compile_weights.append(self.supervised_weight)
self.G_model = Model(inputs=[real_A, real_B],
outputs=model_outputs,
name='G_model')
self.G_model.compile(optimizer=self.opt_G,
loss=compile_losses,
loss_weights=compile_weights)
# ======= Data ==========
# Use 'None' to fetch all available images
nr_A_test_imgs = 1000
nr_B_test_imgs = 1000
if self.use_data_generator:
print('--- Using dataloader during training ---')
else:
print('--- Caching data ---')
sys.stdout.flush()
if load_training_data:
if self.use_data_generator:
self.data_generator = load_data(task=self.task, root=self.data_root, batch_size=self.batch_size,
crop_size=self.im_w, generator=True)
# Only store test images
if opt.task == 'Vimeo2Long_SID':
self.A_test, self.B_test, test_A_image_names, test_B_image_names = get_test_data(nr_A_test_imgs,
nr_B_test_imgs)
else:
self.A_test = []
self.B_test = []
self.A_train = []
self.B_train = []
if not self.use_data_generator:
print('Data has been loaded')
def load_model_and_weights(self, model, weights_path, iteration, by_name):
name = model.name + '_weights_epoch_' + str(iteration)
final_path = os.path.join(root, weights_path, '{}.hdf5'.format(name))
model.load_weights(final_path, by_name=by_name)
def print_info(self):
print('fInitializing Cycle GAN with parameters ...')
print('task: ', self.task)
print('generator architecture: ', self.generator_architecture)
print('image width: ', self.im_w)
print('image height: ', self.im_h)
print('learning date G: ', self.learning_rate_G)
print('learning date D: ', self.learning_rate_D)
print('use patchGAN: ', self.use_patchgan)
print('use_identity_learning: ', self.use_identity_learning)
print('normalization: ', self.normalization)
print('identity_mapping_modulus: ', self.identity_mapping_modulus)
print('lambda_1: ', self.lambda_1)
print('lambda_2: ', self.lambda_2)
print('lambda_D: ', self.lambda_D)
print('beta_1: ', self.beta_1)
print('beta_2: ', self.beta_2)
print('use_supervised_learning: ', self.use_supervised_learning)
print('supervised_weight: ', self.supervised_weight)
print('supervised_loss: ', self.supervised_loss)
def lse(self, y_true, y_pred):
loss = tf.reduce_mean(tf.squared_difference(y_pred, y_true))
return loss
def cycle_loss(self, y_true, y_pred):
loss = tf.reduce_mean(tf.abs(y_pred - y_true))
return loss
| 2.34375 | 2 |
application/fastapi/main.py | edson-dev/neoway | 0 | 4449 | import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from routes import doc, api
from fastapi.templating import Jinja2Templates
from starlette.requests import Request
# configure static and templates file on jinja 2
app = FastAPI(
title=f"Technical Case",
description=f"endpoint para subir planilhas para banco de dados relacional Postgres.",
version=f"0.0.1",
static_directory="static"
)
app.mount("/static", StaticFiles(directory="static"), name="static")
#import factory builders and initiate
doc.init_app(app)
api.init_app(app, "/api")
#
templates = Jinja2Templates(directory="templates")
#views
@app.get("/", tags=["/view"])
async def index(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=8080)
| 2.46875 | 2 |
civis/io/_tables.py | jsfalk/civis-python | 0 | 4450 | <filename>civis/io/_tables.py
import json
import concurrent.futures
import csv
from os import path
import io
import logging
import os
import shutil
from tempfile import TemporaryDirectory
import warnings
import zlib
import gzip
import zipfile
from civis import APIClient
from civis._utils import maybe_get_random_name
from civis.base import EmptyResultError, CivisImportError
from civis.futures import CivisFuture
from civis.io import civis_to_file, file_to_civis, query_civis
from civis.utils import run_job
from civis._deprecation import deprecate_param
import requests
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
try:
import pandas as pd
NO_PANDAS = False
except ImportError:
NO_PANDAS = True
CHUNK_SIZE = 32 * 1024
log = logging.getLogger(__name__)
__all__ = ['read_civis', 'read_civis_sql', 'civis_to_csv',
'civis_to_multifile_csv', 'dataframe_to_civis', 'csv_to_civis',
'civis_file_to_table', 'split_schema_tablename',
'export_to_civis_file']
DELIMITERS = {
',': 'comma',
'\t': 'tab',
'|': 'pipe',
}
@deprecate_param('v2.0.0', 'api_key')
def read_civis(table, database, columns=None, use_pandas=False,
job_name=None, api_key=None, client=None, credential_id=None,
polling_interval=None, archive=False, hidden=True, **kwargs):
"""Read data from a Civis table.
Parameters
----------
table : str
Name of table, including schema, in the database. E.g.
``'my_schema.my_table'``. Schemas or tablenames with periods must
be double quoted, e.g. ``'my_schema."my.table"'``.
database : str or int
Read data from this database. Can be the database name or ID.
columns : list, optional
A list of column names. Column SQL transformations are possible.
If omitted, all columns are exported.
use_pandas : bool, optional
If ``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise,
return a list of results from :func:`python:csv.reader`.
job_name : str, optional
A name to give the job. If omitted, a random job name will be
used.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
credential_id : str or int, optional
The database credential ID. If ``None``, the default credential
will be used.
polling_interval : int or float, optional
Number of seconds to wait between checks for query completion.
archive : bool, optional (deprecated)
If ``True``, archive the import job as soon as it completes.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
**kwargs : kwargs
Extra keyword arguments are passed into
:func:`pandas:pandas.read_csv` if `use_pandas` is ``True`` or
passed into :func:`python:csv.reader` if `use_pandas` is
``False``.
Returns
-------
data : :class:`pandas:pandas.DataFrame` or list
A list of rows (with header as first row) if `use_pandas` is
``False``, otherwise a `pandas` `DataFrame`. Note that if
`use_pandas` is ``False``, no parsing of types is performed and
each row will be a list of strings.
Raises
------
ImportError
If `use_pandas` is ``True`` and `pandas` is not installed.
Examples
--------
>>> table = "schema.table"
>>> database = "my_data"
>>> columns = ["column_a", "ROW_NUMBER() OVER(ORDER BY date) AS order"]
>>> data = read_civis(table, database, columns=columns)
>>> columns = data.pop(0)
>>> col_a_index = columns.index("column_a")
>>> col_a = [row[col_a_index] for row in data]
>>> df = read_civis("schema.table", "my_data", use_pandas=True)
>>> col_a = df["column_a"]
See Also
--------
civis.io.read_civis_sql : Read directly into memory using SQL.
civis.io.civis_to_csv : Write directly to csv.
civis.io.export_to_civis_file : Store a SQL query's results in a Civis file
"""
if use_pandas and NO_PANDAS:
raise ImportError("use_pandas is True but pandas is not installed.")
if archive:
warnings.warn("`archive` is deprecated and will be removed in v2.0.0. "
"Use `hidden` instead.", FutureWarning)
if client is None:
# Instantiate client here in case users provide a (deprecated) api_key
client = APIClient(api_key=api_key)
sql = _get_sql_select(table, columns)
data = read_civis_sql(sql=sql, database=database, use_pandas=use_pandas,
job_name=job_name, client=client,
credential_id=credential_id,
polling_interval=polling_interval,
archive=archive, hidden=hidden, **kwargs)
return data
def export_to_civis_file(sql, database, job_name=None, client=None,
credential_id=None, polling_interval=None,
hidden=True, csv_settings=None):
"""Store results of a query to a Civis file
Parameters
----------
sql : str
The SQL select string to be executed.
database : str or int
Execute the query against this database. Can be the database name
or ID.
job_name : str, optional
A name to give the job. If omitted, a random job name will be
used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
credential_id : str or int, optional
The database credential ID. If ``None``, the default credential
will be used.
polling_interval : int or float, optional
Number of seconds to wait between checks for query completion.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
csv_settings : dict, optional
A dictionary of csv_settings to pass to
:func:`civis.APIClient.scripts.post_sql`.
Returns
-------
fut : :class:`~civis.futures.CivisFuture`
A future which returns the response from
:func:`civis.APIClient.scripts.get_sql_runs` after the sql query
has completed and the result has been stored as a Civis file.
Examples
--------
>>> sql = "SELECT * FROM schema.table"
>>> fut = export_to_civis_file(sql, "my_database")
>>> file_id = fut.result()['output'][0]["file_id"]
See Also
--------
civis.io.read_civis : Read directly into memory without SQL.
civis.io.read_civis_sql : Read results of a SQL query into memory.
civis.io.civis_to_csv : Write directly to a CSV file.
civis.io.civis_file_to_table : Upload a Civis file to a Civis table
"""
client = client or APIClient()
script_id, run_id = _sql_script(client=client,
sql=sql,
database=database,
job_name=job_name,
credential_id=credential_id,
csv_settings=csv_settings,
hidden=hidden)
fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id),
polling_interval=polling_interval, client=client,
poll_on_creation=False)
return fut
@deprecate_param('v2.0.0', 'api_key')
def read_civis_sql(sql, database, use_pandas=False, job_name=None,
api_key=None, client=None, credential_id=None,
polling_interval=None, archive=False,
hidden=True, **kwargs):
"""Read data from Civis using a custom SQL string.
The custom SQL string will be executed twice; once to attempt to
retrieve headers and once to retrieve the data. This is done to
use a more performant method for retrieving the data. The first
execution of the custom SQL is controlled such that changes in
state cannot occur (e.g., INSERT, UPDATE, DELETE, etc.).
Parameters
----------
sql : str
The SQL select string to be executed.
database : str or int
Execute the query against this database. Can be the database name
or ID.
use_pandas : bool, optional
If ``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise,
return a list of results from :func:`python:csv.reader`.
job_name : str, optional
A name to give the job. If omitted, a random job name will be
used.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
credential_id : str or int, optional
The database credential ID. If ``None``, the default credential
will be used.
polling_interval : int or float, optional
Number of seconds to wait between checks for query completion.
archive : bool, optional (deprecated)
If ``True``, archive the import job as soon as it completes.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
**kwargs : kwargs
Extra keyword arguments are passed into
:func:`pandas:pandas.read_csv` if `use_pandas` is ``True`` or
passed into :func:`python:csv.reader` if `use_pandas` is
``False``.
Returns
-------
data : :class:`pandas:pandas.DataFrame` or list
A list of rows (with header as first row) if `use_pandas` is
``False``, otherwise a `pandas` `DataFrame`. Note that if
`use_pandas` is ``False``, no parsing of types is performed and
each row will be a list of strings.
Raises
------
ImportError
If `use_pandas` is ``True`` and `pandas` is not installed.
Examples
--------
>>> sql = "SELECT * FROM schema.table"
>>> df = read_civis_sql(sql, "my_database", use_pandas=True)
>>> col_a = df["column_a"]
>>> data = read_civis_sql(sql, "my_database")
>>> columns = data.pop(0)
>>> col_a_index = columns.index("column_a")
>>> col_a = [row[col_a_index] for row in data]
Notes
-----
This reads the data into memory.
See Also
--------
civis.io.read_civis : Read directly into memory without SQL.
civis.io.civis_to_csv : Write directly to a CSV file.
"""
if client is None:
client = APIClient(api_key=api_key)
if use_pandas and NO_PANDAS:
raise ImportError("use_pandas is True but pandas is not installed.")
if archive:
warnings.warn("`archive` is deprecated and will be removed in v2.0.0. "
"Use `hidden` instead.", FutureWarning)
db_id = client.get_database_id(database)
credential_id = credential_id or client.default_credential
# Try to get headers separately. In most scenarios this will greatly
# reduce the work that Platform does to provide a single output file
# with headers prepended to it due to how distributed databases export
# data at scale.
headers = _get_headers(client, sql, db_id, credential_id, polling_interval)
# include_header defaults to True in the API.
include_header = True if headers is None else False
csv_settings = dict(include_header=include_header,
compression='gzip')
script_id, run_id = _sql_script(client, sql, db_id,
job_name, credential_id,
csv_settings=csv_settings,
hidden=hidden)
fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id),
polling_interval=polling_interval, client=client,
poll_on_creation=False)
if archive:
def f(x):
return client.scripts.put_sql_archive(script_id, True)
fut.add_done_callback(f)
fut.result()
outputs = client.scripts.get_sql_runs(script_id, run_id)["output"]
if not outputs:
raise EmptyResultError("Query {} returned no output."
.format(script_id))
url = outputs[0]["path"]
file_id = outputs[0]["file_id"]
log.debug('Exported results to Civis file %s (%s)',
outputs[0]["output_name"], file_id)
if use_pandas:
# allows users to enter their own names parameter
_kwargs = {'names': headers}
_kwargs.update(kwargs)
_kwargs['compression'] = 'gzip'
data = pd.read_csv(url, **_kwargs)
else:
response = requests.get(url, stream=True)
response.raise_for_status()
with StringIO() as buf:
if headers:
buf.write(','.join(headers) + '\n')
_decompress_stream(response, buf, write_bytes=False)
buf.seek(0)
data = list(csv.reader(buf, **kwargs))
return data
@deprecate_param('v2.0.0', 'api_key')
def civis_to_csv(filename, sql, database, job_name=None, api_key=None,
client=None, credential_id=None, include_header=True,
compression='none', delimiter=',', unquoted=False,
archive=False, hidden=True, polling_interval=None):
"""Export data from Civis to a local CSV file.
The custom SQL string will be executed twice; once to attempt to
retrieve headers and once to retrieve the data. This is done to
use a more performant method for retrieving the data. The first
execution of the custom SQL is controlled such that changes in
state cannot occur (e.g., INSERT, UPDATE, DELETE, etc.).
Parameters
----------
filename : str
Download exported data into this file.
sql : str
The SQL select string to be executed.
database : str or int
Export data from this database. Can be the database name or ID.
job_name : str, optional
A name to give the job. If omitted, a random job name will be
used.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
credential_id : str or int, optional
The ID of the database credential. If ``None``, the default
credential will be used.
include_header: bool, optional
If ``True``, the first line of the CSV will be headers.
Default: ``True``.
compression: str, optional
Type of compression to use, if any. One of ``'none'``, ``'zip'``, or
``'gzip'``. Default ``'none'``. ``'gzip'`` currently returns a file
with no compression unless include_header is set to False. In a
future release, a ``'gzip'`` compressed file will be returned for
all cases.
delimiter: str, optional
Which delimiter to use, if any. One of ``','``, ``'\t'``, or
``'|'``. Default: ``','``.
unquoted: bool, optional
Whether or not to quote fields. Default: ``False``.
polling_interval : int or float, optional
Number of seconds to wait between checks for query completion.
archive : bool, optional (deprecated)
If ``True``, archive the import job as soon as it completes.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
Returns
-------
results : :class:`~civis.futures.CivisFuture`
A `CivisFuture` object.
Examples
--------
>>> sql = "SELECT * FROM schema.table"
>>> fut = civis_to_csv("file.csv", sql, "my_database")
>>> fut.result() # Wait for job to complete
See Also
--------
civis.io.read_civis : Read table contents into memory.
civis.io.read_civis_sql : Read results of a SQL query into memory.
civis.io.export_to_civis_file : Store a SQL query's results in a Civis file
"""
if archive:
warnings.warn("`archive` is deprecated and will be removed in v2.0.0. "
"Use `hidden` instead.", FutureWarning)
if client is None:
client = APIClient(api_key=api_key)
db_id = client.get_database_id(database)
credential_id = credential_id or client.default_credential
# don't fix bug that would cause breaking change for now
# when gzip compression is requested, a gzip file is not actually returned
# instead the gzip file is decompressed during download
if compression == 'gzip' and include_header:
compression = 'none'
# don't support parallel unload; the output format
# is different which would introduce a breaking change
headers = b''
delimiter = DELIMITERS.get(delimiter)
if not delimiter:
raise ValueError("delimiter must be one of {}"
.format(DELIMITERS.keys()))
# always set compression to gzip to reduce I/O
csv_settings = dict(include_header=include_header,
compression='gzip',
column_delimiter=delimiter,
unquoted=unquoted,
filename_prefix=None,
force_multifile=False)
script_id, run_id = _sql_script(client, sql, db_id, job_name,
credential_id, hidden=hidden,
csv_settings=csv_settings)
fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id),
polling_interval=polling_interval, client=client,
poll_on_creation=False)
download = _download_callback(script_id, run_id, filename,
headers, compression)
fut.add_done_callback(download)
if archive:
def f(x):
return client.scripts.put_sql_archive(script_id, True)
fut.add_done_callback(f)
return fut
@deprecate_param('v2.0.0', 'api_key')
def civis_to_multifile_csv(sql, database, job_name=None, api_key=None,
client=None, credential_id=None,
include_header=True,
compression='none', delimiter='|',
max_file_size=None,
unquoted=False, prefix=None,
polling_interval=None, hidden=True):
"""Unload the result of SQL query and return presigned urls.
This function is intended for unloading large queries/tables from redshift
as it uses a 'PARALLEL ON' S3 unload. It returns a similar manifest file
to conventional S3 UNLOAD statements except the CSV parts are accessible
via both files endpoint IDs and presigned S3 urls.
Parameters
----------
sql : str
The SQL select string to be executed.
database : str or int
Execute the query against this database. Can be the database name
or ID.
job_name : str, optional
A name to give the job. If omitted, a random job name will be
used.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
credential_id : str or int, optional
The database credential ID. If ``None``, the default credential
will be used.
include_header: bool, optional
If ``True`` include a key in the returned dictionary containing a list
of column names. Default: ``True``.
compression: str, optional
Type of compression to use, if any. One of ``'none'``, ``'zip'``, or
``'gzip'``. Default ``'none'``.
delimiter: str, optional
Which delimiter to use, if any. One of ``','``, ``'\t'``, or
``'|'``. Default: ``'|'``.
max_file_size: int, optional
Maximum number of Megabytes each created file will be.
unquoted: bool, optional
Whether or not to quote fields. Default: ``False``.
prefix: str, optional
A user specified filename prefix for the output file to have. Default:
``None``.
polling_interval : int or float, optional
Number of seconds to wait between checks for query completion.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
Returns
-------
unload_manifest: dict
A dictionary resembling an AWS manifest file. Has the following keys:
'query': str
The query.
'header': list of str
The columns from the query.
'entries': list of dict
Each dict has the following keys:
'id': int
File ID
'name': str
Filename
'size': int
File size in bytes
'url': str
Unsigned S3 URL ('s3://...')
'url_signed': str
Signed S3 URL ('https://...')
'unquoted': bool
Whether the cells are quoted.
'compression': str
Type of compression used.
'delimiter': str
Delimiter that separates the cells.
Examples
--------
>>> sql = "SELECT * FROM schema.my_big_table"
>>> database = "my_database"
>>> delimiter = "|"
>>> manifest = civis_to_multifile_csv(sql, database, delimiter=delimiter)
>>> ids = [entry['id'] for entry in manifest['entries']]
>>> buf = BytesIO()
>>> civis_to_file(ids[0], buf)
>>> buf.seek(0)
>>> df = pd.read_csv(buf, delimiter=delimiter)
See Also
--------
civis.APIClient.scripts.post_sql
"""
if client is None:
client = APIClient(api_key=api_key)
delimiter = DELIMITERS.get(delimiter)
assert delimiter, "delimiter must be one of {}".format(DELIMITERS.keys())
csv_settings = dict(include_header=include_header,
compression=compression,
column_delimiter=delimiter,
unquoted=unquoted,
filename_prefix=prefix,
force_multifile=True,
max_file_size=max_file_size)
script_id, run_id = _sql_script(client, sql, database, job_name,
credential_id, hidden,
csv_settings=csv_settings)
fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id),
polling_interval=polling_interval, client=client,
poll_on_creation=False)
outputs = fut.result()["output"]
if not outputs:
raise EmptyResultError("Unload query {} returned no manifest."
.format(script_id))
buf = io.BytesIO()
civis_to_file(outputs[0]['file_id'], buf, client=client)
txt = io.TextIOWrapper(buf, encoding='utf-8')
txt.seek(0)
unload_manifest = json.load(txt)
return unload_manifest
@deprecate_param('v2.0.0', 'api_key', 'headers')
def dataframe_to_civis(df, database, table, api_key=None, client=None,
max_errors=None, existing_table_rows="fail",
diststyle=None, distkey=None,
sortkey1=None, sortkey2=None,
table_columns=None,
headers=None, credential_id=None,
primary_keys=None, last_modified_keys=None,
execution="immediate",
delimiter=None, polling_interval=None,
archive=False, hidden=True, **kwargs):
"""Upload a `pandas` `DataFrame` into a Civis table.
The `DataFrame`'s index will not be included. To store the index
along with the other values, use `df.reset_index()` instead
of `df` as the first argument to this function.
Parameters
----------
df : :class:`pandas:pandas.DataFrame`
The `DataFrame` to upload to Civis.
database : str or int
Upload data into this database. Can be the database name or ID.
table : str
The schema and table you want to upload to. E.g.,
``'scratch.table'``. Schemas or tablenames with periods must
be double quoted, e.g. ``'scratch."my.table"'``.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
max_errors : int, optional
The maximum number of rows with errors to remove from the import
before failing.
existing_table_rows : str, optional
The behaviour if a table with the requested name already exists.
One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or
``'upsert'``. Defaults to ``'fail'``.
diststyle : str, optional
The distribution style for the table.
One of ``'even'``, ``'all'`` or ``'key'``.
distkey : str, optional
The column to use as the distkey for the table.
sortkey1 : str, optional
The column to use as the sortkey for the table.
sortkey2 : str, optional
The second column in a compound sortkey for the table.
table_columns : list[Dict[str, str]], optional
A list of dictionaries corresponding to the columns in
the source file. Each dictionary should have keys
for column "name" and "sqlType". The import will only copy these
columns regardless if there are more columns in the table.
headers : bool, optional [DEPRECATED]
Whether or not the first row of the file should be treated as
headers. The default, ``None``, attempts to autodetect whether
or not the first row contains headers.
This parameter has no effect in versions >= 1.11 and will be
removed in v2.0. Tables will always be written with column
names read from the DataFrame. Use the `header` parameter
(which will be passed directly to :func:`~pandas.DataFrame.to_csv`)
to modify the column names in the Civis Table.
credential_id : str or int, optional
The ID of the database credential. If ``None``, the default
credential will be used.
primary_keys: list[str], optional
A list of the primary key column(s) of the destination table that
uniquely identify a record. If existing_table_rows is "upsert", this
field is required. Note that this is true regardless of whether the
destination database itself requires a primary key.
last_modified_keys: list[str], optional
A list of the columns indicating a record has been updated. If
existing_table_rows is "upsert", this field is required.
escaped: bool, optional
A boolean value indicating whether or not the source file has quotes
escaped with a backslash. Defaults to false.
execution: string, optional, default "immediate"
One of "delayed" or "immediate". If "immediate", refresh column
statistics as part of the run. If "delayed", flag the table for a
deferred statistics update; column statistics may not be available
for up to 24 hours. In addition, if existing_table_rows is "upsert",
delayed executions move data from staging table to final table after a
brief delay, in order to accommodate multiple concurrent imports to the
same destination table.
polling_interval : int or float, optional
Number of seconds to wait between checks for job completion.
archive : bool, optional (deprecated)
If ``True``, archive the import job as soon as it completes.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
**kwargs : kwargs
Extra keyword arguments will be passed to
:meth:`pandas:pandas.DataFrame.to_csv`.
Returns
-------
fut : :class:`~civis.futures.CivisFuture`
A `CivisFuture` object.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
>>> fut = civis.io.dataframe_to_civis(df, 'my-database',
... 'scratch.df_table')
>>> fut.result()
See Also
--------
:func:`~pandas.DataFrame.to_csv`
"""
if client is None:
client = APIClient(api_key=api_key)
if archive:
warnings.warn("`archive` is deprecated and will be removed in v2.0.0. "
"Use `hidden` instead.", FutureWarning)
headers = False if kwargs.get('header') is False else True
with TemporaryDirectory() as tmp_dir:
tmp_path = os.path.join(tmp_dir, 'dataframe_to_civis.csv')
to_csv_kwargs = {'encoding': 'utf-8', 'index': False}
to_csv_kwargs.update(kwargs)
df.to_csv(tmp_path, **to_csv_kwargs)
_, name = split_schema_tablename(table)
file_id = file_to_civis(tmp_path, name, client=client)
delimiter = ','
fut = civis_file_to_table(file_id, database, table,
client=client, max_errors=max_errors,
existing_table_rows=existing_table_rows,
diststyle=diststyle, distkey=distkey,
sortkey1=sortkey1, sortkey2=sortkey2,
table_columns=table_columns,
delimiter=delimiter, headers=headers,
credential_id=credential_id,
primary_keys=primary_keys,
last_modified_keys=last_modified_keys,
escaped=False, execution=execution,
polling_interval=polling_interval,
hidden=hidden)
return fut
@deprecate_param('v2.0.0', 'api_key')
def csv_to_civis(filename, database, table, api_key=None, client=None,
max_errors=None, existing_table_rows="fail",
diststyle=None, distkey=None,
sortkey1=None, sortkey2=None,
table_columns=None,
delimiter=",", headers=None,
primary_keys=None, last_modified_keys=None,
escaped=False, execution="immediate",
credential_id=None, polling_interval=None, archive=False,
hidden=True):
"""Upload the contents of a local CSV file to Civis.
Parameters
----------
filename : str
Upload the contents of this file.
database : str or int
Upload data into this database. Can be the database name or ID.
table : str
The schema and table you want to upload to. E.g.,
``'scratch.table'``.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
max_errors : int, optional
The maximum number of rows with errors to remove from the import
before failing.
existing_table_rows : str, optional
The behaviour if a table with the requested name already exists.
One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or
``'upsert'``. Defaults to ``'fail'``.
diststyle : str, optional
The distribution style for the table.
One of ``'even'``, ``'all'`` or ``'key'``.
distkey : str, optional
The column to use as the distkey for the table.
sortkey1 : str, optional
The column to use as the sortkey for the table.
sortkey2 : str, optional
The second column in a compound sortkey for the table.
table_columns : list[Dict[str, str]], optional
A list of dictionaries corresponding to the columns in
the source file. Each dictionary should have keys
for column "name" and "sqlType". The import will only copy these
columns regardless if there are more columns in the table.
delimiter : string, optional
The column delimiter. One of ``','``, ``'\\t'`` or ``'|'``.
headers : bool, optional
Whether or not the first row of the file should be treated as
headers. The default, ``None``, attempts to autodetect whether
or not the first row contains headers.
primary_keys: list[str], optional
A list of the primary key column(s) of the destination table that
uniquely identify a record. If existing_table_rows is "upsert", this
field is required. Note that this is true regardless of whether the
destination database itself requires a primary key.
last_modified_keys: list[str], optional
A list of the columns indicating a record has been updated. If
existing_table_rows is "upsert", this field is required.
escaped: bool, optional
A boolean value indicating whether or not the source file has quotes
escaped with a backslash. Defaults to false.
execution: string, optional, default "immediate"
One of "delayed" or "immediate". If "immediate", refresh column
statistics as part of the run. If "delayed", flag the table for a
deferred statistics update; column statistics may not be available
for up to 24 hours. In addition, if existing_table_rows is "upsert",
delayed executions move data from staging table to final table after a
brief delay, in order to accommodate multiple concurrent imports to the
same destination table.
credential_id : str or int, optional
The ID of the database credential. If ``None``, the default
credential will be used.
polling_interval : int or float, optional
Number of seconds to wait between checks for job completion.
archive : bool, optional (deprecated)
If ``True``, archive the import job as soon as it completes.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
Returns
-------
results : :class:`~civis.futures.CivisFuture`
A `CivisFuture` object.
Notes
-----
This reads the contents of `filename` into memory.
Examples
--------
>>> with open('input_file.csv', 'w') as _input:
... _input.write('a,b,c\\n1,2,3')
>>> fut = civis.io.csv_to_civis('input_file.csv',
... 'my-database',
... 'scratch.my_data')
>>> fut.result()
"""
if client is None:
client = APIClient(api_key=api_key)
if archive:
warnings.warn("`archive` is deprecated and will be removed in v2.0.0. "
"Use `hidden` instead.", FutureWarning)
name = path.basename(filename)
with open(filename, "rb") as data:
file_id = file_to_civis(data, name, client=client)
log.debug('Uploaded file %s to Civis file %s', filename, file_id)
fut = civis_file_to_table(file_id, database, table,
client=client, max_errors=max_errors,
existing_table_rows=existing_table_rows,
diststyle=diststyle, distkey=distkey,
sortkey1=sortkey1, sortkey2=sortkey2,
table_columns=table_columns,
delimiter=delimiter, headers=headers,
credential_id=credential_id,
primary_keys=primary_keys,
last_modified_keys=last_modified_keys,
escaped=escaped, execution=execution,
polling_interval=polling_interval,
hidden=hidden)
return fut
@deprecate_param('v2.0.0', 'file_id')
def civis_file_to_table(file_id, database, table, client=None,
max_errors=None, existing_table_rows="fail",
diststyle=None, distkey=None,
sortkey1=None, sortkey2=None,
table_columns=None,
primary_keys=None, last_modified_keys=None,
escaped=False, execution="immediate",
delimiter=None, headers=None,
credential_id=None, polling_interval=None,
hidden=True):
"""Upload the contents of one or more Civis files to a Civis table.
All provided files will be loaded as an atomic unit in parallel, and
should share the same columns in the same order, and be in the same
format.
Parameters
----------
file_id : int or list[int]
Civis file ID or a list of Civis file IDs. Reference by name to this
argument is deprecated, as the name will change in v2.0.0.
database : str or int
Upload data into this database. Can be the database name or ID.
table : str
The schema and table you want to upload to. E.g.,
``'scratch.table'``.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
max_errors : int, optional
The maximum number of rows with errors to remove from the import
before failing. If multiple files are provided, this limit applies
across all files combined.
existing_table_rows : str, optional
The behaviour if a table with the requested name already exists.
One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or
``'upsert'``. Defaults to ``'fail'``.
diststyle : str, optional
The distribution style for the table.
One of ``'even'``, ``'all'`` or ``'key'``.
distkey : str, optional
The column to use as the distkey for the table.
sortkey1 : str, optional
The column to use as the sortkey for the table.
sortkey2 : str, optional
The second column in a compound sortkey for the table.
table_columns : list[Dict[str, str]], optional
A list of dictionaries corresponding to the columns in
the source file. Each dictionary should have keys
for column "name" and "sqlType". The import will only copy these
columns regardless if there are more columns in the table.
primary_keys: list[str], optional
A list of the primary key column(s) of the destination table that
uniquely identify a record. If existing_table_rows is "upsert", this
field is required. Note that this is true regardless of whether the
destination database itself requires a primary key.
last_modified_keys: list[str], optional
A list of the columns indicating a record has been updated. If
existing_table_rows is "upsert", this field is required.
escaped: bool, optional
A boolean value indicating whether or not the source file(s) escape
quotes with a backslash. Defaults to false.
execution: string, optional, default "immediate"
One of "delayed" or "immediate". If "immediate", refresh column
statistics as part of the run. If "delayed", flag the table for a
deferred statistics update; column statistics may not be available
for up to 24 hours. In addition, if existing_table_rows is "upsert",
delayed executions move data from staging table to final table after a
brief delay, in order to accommodate multiple concurrent imports to the
same destination table.
delimiter : string, optional
The column delimiter. One of ``','``, ``'\\t'`` or ``'|'``. If not
provided, will attempt to auto-detect.
headers : bool, optional
Whether or not the first row of the file should be treated as
headers. The default, ``None``, attempts to autodetect whether
or not the first row contains headers.
credential_id : str or int, optional
The ID of the database credential. If ``None``, the default
credential will be used.
polling_interval : int or float, optional
Number of seconds to wait between checks for job completion.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
Returns
-------
results : :class:`~civis.futures.CivisFuture`
A `CivisFuture` object.
Raises
------
CivisImportError
If multiple files are given and determined to be incompatible for
import. This may be the case if their columns have different types,
their delimiters are different, headers are present in some but not
others, or compressions do not match.
Examples
--------
>>> file_id = 100
>>> fut = civis.io.civis_file_to_table(file_id,
... 'my-database',
... 'scratch.my_data')
>>> fut.result()
"""
if client is None:
client = APIClient()
schema, table = split_schema_tablename(table)
if isinstance(file_id, int):
file_id = [file_id]
if schema is None:
raise ValueError("Provide a schema as part of the `table` input.")
db_id = client.get_database_id(database)
cred_id = credential_id or client.default_credential
if delimiter is not None: # i.e. it was provided as an argument
delimiter = DELIMITERS.get(delimiter)
assert delimiter, "delimiter must be one of {}".format(
DELIMITERS.keys()
)
try:
client.get_table_id(table, database)
log.debug('Table {table} already exists - skipping column '
'detection'.format(table=table))
table_exists = True
except ValueError:
table_exists = False
# Use Preprocess endpoint to get the table columns as needed
# and perform necessary file cleaning
need_table_columns = ((not table_exists or existing_table_rows == 'drop')
and table_columns is None)
cleaning_futures = _run_cleaning(file_id, client, need_table_columns,
headers, delimiter, hidden)
(cleaned_file_ids, headers, compression, delimiter,
cleaned_table_columns) = _process_cleaning_results(
cleaning_futures, client, headers, need_table_columns, delimiter
)
table_columns = table_columns or cleaned_table_columns
source = dict(file_ids=cleaned_file_ids)
destination = dict(schema=schema, table=table, remote_host_id=db_id,
credential_id=cred_id, primary_keys=primary_keys,
last_modified_keys=last_modified_keys)
redshift_options = dict(distkey=distkey, sortkeys=[sortkey1, sortkey2],
diststyle=diststyle)
# If multiple files are being imported, there might be differences in
# their precisions/lengths - setting this option will allow the Civis API
# to increase these values for the data types provided, and decreases the
# risk of a length-related import failure
loosen_types = len(file_id) > 1
import_name = 'CSV import to {}.{}'.format(schema, table)
import_job = client.imports.post_files_csv(
source,
destination,
headers,
name=import_name,
max_errors=max_errors,
existing_table_rows=existing_table_rows,
column_delimiter=delimiter,
compression=compression,
escaped=escaped,
execution=execution,
loosen_types=loosen_types,
table_columns=table_columns,
redshift_destination_options=redshift_options,
hidden=hidden
)
fut = run_job(import_job.id, client=client,
polling_interval=polling_interval)
log.debug('Started run %d for import %d', fut.run_id, import_job.id)
return fut
def _sql_script(client, sql, database, job_name, credential_id, hidden=False,
csv_settings=None):
job_name = maybe_get_random_name(job_name)
db_id = client.get_database_id(database)
credential_id = credential_id or client.default_credential
csv_settings = csv_settings or {}
export_job = client.scripts.post_sql(job_name,
remote_host_id=db_id,
credential_id=credential_id,
sql=sql,
hidden=hidden,
csv_settings=csv_settings)
run_job = client.scripts.post_sql_runs(export_job.id)
log.debug('Started run %d of SQL script %d', run_job.id, export_job.id)
return export_job.id, run_job.id
def _get_sql_select(table, columns=None):
if columns and not isinstance(columns, (list, tuple)):
raise TypeError("columns must be a list, tuple or None")
select = ", ".join(columns) if columns is not None else "*"
sql = "select {} from {}".format(select, table)
return sql
def _get_headers(client, sql, database, credential_id, polling_interval=None):
headers = None
try:
# use 'begin read only;' to ensure we can't change state
sql = 'begin read only; select * from ({}) limit 1'.format(sql)
fut = query_civis(sql, database, client=client,
credential_id=credential_id,
polling_interval=polling_interval)
headers = fut.result()['result_columns']
except Exception as exc: # NOQA
log.debug("Failed to retrieve headers due to %s", str(exc))
return headers
def _decompress_stream(response, buf, write_bytes=True):
# use response.raw for a more consistent approach
# if content-encoding is specified in the headers
# then response.iter_content will decompress the stream
# however, our use of content-encoding is inconsistent
chunk = response.raw.read(CHUNK_SIZE)
d = zlib.decompressobj(zlib.MAX_WBITS | 32)
while chunk or d.unused_data:
if d.unused_data:
to_decompress = d.unused_data + chunk
d = zlib.decompressobj(zlib.MAX_WBITS | 32)
else:
to_decompress = d.unconsumed_tail + chunk
if write_bytes:
buf.write(d.decompress(to_decompress))
else:
buf.write(d.decompress(to_decompress).decode('utf-8'))
chunk = response.raw.read(CHUNK_SIZE)
def _download_file(url, local_path, headers, compression):
response = requests.get(url, stream=True)
response.raise_for_status()
# gzipped buffers can be concatenated so write headers as gzip
if compression == 'gzip':
with gzip.open(local_path, 'wb') as fout:
fout.write(headers)
with open(local_path, 'ab') as fout:
shutil.copyfileobj(response.raw, fout, CHUNK_SIZE)
# write headers and decompress the stream
elif compression == 'none':
with open(local_path, 'wb') as fout:
fout.write(headers)
_decompress_stream(response, fout)
# decompress the stream, write headers, and zip the file
elif compression == 'zip':
with TemporaryDirectory() as tmp_dir:
tmp_path = path.join(tmp_dir, 'civis_to_csv.csv')
with open(tmp_path, 'wb') as tmp_file:
tmp_file.write(headers)
_decompress_stream(response, tmp_file)
with zipfile.ZipFile(local_path, 'w') as fout:
arcname = path.basename(local_path)
if arcname.split('.')[-1] == 'zip':
arcname = arcname.split('.')[0] + '.csv'
fout.write(tmp_path, arcname, zipfile.ZIP_DEFLATED)
def _download_callback(job_id, run_id, filename, headers, compression):
def callback(future):
if not future.succeeded():
return
outputs = future.result().get("output")
if not outputs:
warnings.warn("Job %s, run %s does not have any output to "
"download. Not creating file %s."
% (job_id, run_id, filename),
RuntimeWarning)
return
else:
url = outputs[0]["path"]
file_id = outputs[0]["file_id"]
log.debug('Exported results to Civis file %s', file_id)
return _download_file(url, filename, headers, compression)
return callback
def split_schema_tablename(table):
"""Split a Redshift 'schema.tablename' string
Remember that special characters (such as '.') can only
be included in a schema or table name if delimited by double-quotes.
Parameters
----------
table: str
Either a Redshift schema and table name combined
with a ".", or else a single table name.
Returns
-------
schema, tablename
A 2-tuple of strings. The ``schema`` may be None if the input
is only a table name, but the ``tablename`` will always be filled.
Raises
------
ValueError
If the input ``table`` is not separable into a schema and
table name.
"""
reader = csv.reader(StringIO(str(table)),
delimiter=".",
doublequote=True,
quotechar='"')
schema_name_tup = next(reader)
if len(schema_name_tup) == 1:
schema_name_tup = (None, schema_name_tup[0])
if len(schema_name_tup) != 2:
raise ValueError("Cannot parse schema and table. "
"Does '{}' follow the pattern 'schema.table'?"
.format(table))
return tuple(schema_name_tup)
def _replace_null_column_names(column_list):
"""Replace null names in columns from file cleaning with an appropriately
blank column name.
Parameters
----------
column_list: list[dict]
the list of columns from file cleaning.
Returns
--------
column_list: list[dict]
"""
new_cols = []
for i, col in enumerate(column_list):
# Avoid mutating input arguments
new_col = dict(col)
if new_col.get('name') is None:
new_col['name'] = 'column_{}'.format(i)
new_cols.append(new_col)
return new_cols
def _run_cleaning(file_ids, client, need_table_columns, headers, delimiter,
hidden, polling_interval=None):
cleaning_futures = []
for fid in file_ids:
cleaner_job = client.files.post_preprocess_csv(
file_id=fid,
in_place=False,
detect_table_columns=need_table_columns,
force_character_set_conversion=True,
include_header=headers,
column_delimiter=delimiter,
hidden=hidden
)
cleaning_futures.append(run_job(cleaner_job.id, client=client,
polling_interval=polling_interval))
return cleaning_futures
def _check_all_detected_info(detected_info, headers, delimiter,
compression, output_file_id):
"""Check a single round of cleaning results as compared to provided values.
Parameters
----------
detected_info: Dict[str, Any]
The detected info of the file as returned by the Civis API.
headers: bool
The provided value for whether or not the file contains errors.
delimiter: str
The provided value for the file delimiter.
compression: str
The provided value for the file compression.
output_file_id: int
The cleaned file's Civis ID. Used for debugging.
Raises
------
CivisImportError
If the values detected on the file do not match their expected
attributes.
"""
if headers != detected_info['includeHeader']:
raise CivisImportError('Mismatch between detected headers - '
'please ensure all imported files either '
'have a header or do not.')
if delimiter != detected_info['columnDelimiter']:
raise CivisImportError('Provided delimiter "{}" does not match '
'detected delimiter for {}: "{}"'.format(
delimiter,
output_file_id,
detected_info["columnDelimiter"])
)
if compression != detected_info['compression']:
raise CivisImportError('Mismatch between detected and provided '
'compressions - provided compression was {}'
' but detected compression {}. Please '
'ensure all imported files have the same '
'compression.'.format(
compression,
detected_info['compression'])
)
def _process_cleaning_results(cleaning_futures, client, headers,
need_table_columns, delimiter):
cleaned_file_ids = []
done, still_going = concurrent.futures.wait(
cleaning_futures, return_when=concurrent.futures.FIRST_COMPLETED
)
# Set values from first completed file cleaning - other files will be
# compared to this one. If inconsistencies are detected, raise an error.
first_completed = done.pop()
output_file = client.jobs.list_runs_outputs(
first_completed.job_id,
first_completed.run_id
)[0]
detected_info = client.files.get(output_file.object_id).detected_info
table_columns = (detected_info['tableColumns'] if need_table_columns
else None)
if headers is None:
headers = detected_info['includeHeader']
if delimiter is None:
delimiter = detected_info['columnDelimiter']
compression = detected_info['compression']
_check_all_detected_info(detected_info, headers, delimiter, compression,
output_file.object_id)
cleaned_file_ids.append(output_file.object_id)
# Ensure that all results from files are correctly accounted for -
# Since concurrent.futures.wait returns two sets, it is possible
# That done contains more than one Future. Thus it is necessary to account
# for these possible completed cleaning runs while waiting on those which
# are still running.
for result in concurrent.futures.as_completed(done | still_going):
output_file = client.jobs.list_runs_outputs(
result.job_id,
result.run_id
)[0]
detected_info = client.files.get(output_file.object_id).detected_info
if need_table_columns:
file_columns = detected_info['tableColumns']
_check_column_types(table_columns, file_columns,
output_file.object_id)
_check_all_detected_info(detected_info, headers, delimiter,
compression, output_file.object_id)
cleaned_file_ids.append(output_file.object_id)
if need_table_columns:
table_columns = _replace_null_column_names(table_columns)
return cleaned_file_ids, headers, compression, delimiter, table_columns
def _check_column_types(table_columns, file_columns, output_obj_id):
"""Check that base column types match those current defined for the table.
Parameters
----------
table_columns: List[Dict[str, str]]
The columns for the table to be created.
file_columns: List[Dict[str, str]]
The columns detected by the Civis API for the file.
output_obj_id: int
The file ID under consideration; used for error messaging.
Raises
------
CivisImportError
If the table columns and the file columns have a type mismatch, or
differ in count.
"""
if len(table_columns) != len(file_columns):
raise CivisImportError('All files should have the same number of '
'columns. Expected {} columns but file {} '
'has {} columns'.format(
len(table_columns),
output_obj_id,
len(file_columns))
)
error_msgs = []
for idx, (tcol, fcol) in enumerate(zip(table_columns, file_columns)):
# for the purposes of type checking, we care only that the types
# share a base type (e.g. INT, VARCHAR, DECIMAl) rather than that
# they have the same precision and length
# (e.g VARCHAR(42), DECIMAL(8, 10))
tcol_base_type = tcol['sql_type'].split('(', 1)[0]
fcol_base_type = fcol['sql_type'].split('(', 1)[0]
if tcol_base_type != fcol_base_type:
error_msgs.append(
'Column {}: File base type was {}, but expected {}'.format(
idx,
fcol_base_type,
tcol_base_type
)
)
if error_msgs:
raise CivisImportError(
'Encountered the following errors for file {}:\n\t{}'.format(
output_obj_id,
'\n\t'.join(error_msgs)
)
)
| 2.21875 | 2 |
tests/unit/small_text/integrations/pytorch/test_strategies.py | chschroeder/small-text | 218 | 4451 | <reponame>chschroeder/small-text<filename>tests/unit/small_text/integrations/pytorch/test_strategies.py
import unittest
import pytest
from small_text.integrations.pytorch.exceptions import PytorchNotFoundError
try:
from small_text.integrations.pytorch.query_strategies import (
BADGE,
ExpectedGradientLength,
ExpectedGradientLengthMaxWord)
except PytorchNotFoundError:
pass
@pytest.mark.pytorch
class BADGETest(unittest.TestCase):
def test_init_default(self):
strategy = BADGE(2)
self.assertEqual(2, strategy.num_classes)
def test_init(self):
strategy = BADGE(4)
self.assertEqual(4, strategy.num_classes)
def test_badge_str(self):
strategy = BADGE(2)
expected_str = 'BADGE(num_classes=2)'
self.assertEqual(expected_str, str(strategy))
@pytest.mark.pytorch
class ExpectedGradientLengthTest(unittest.TestCase):
def test_init_default(self):
strategy = ExpectedGradientLength(2)
self.assertEqual(2, strategy.num_classes)
self.assertEqual(50, strategy.batch_size)
self.assertEqual('cuda', strategy.device)
def test_init(self):
strategy = ExpectedGradientLength(4, batch_size=100, device='cpu')
self.assertEqual(4, strategy.num_classes)
self.assertEqual(100, strategy.batch_size)
self.assertEqual('cpu', strategy.device)
def test_expected_gradient_length_str(self):
strategy = ExpectedGradientLength(2)
expected_str = 'ExpectedGradientLength()'
self.assertEqual(expected_str, str(strategy))
@pytest.mark.pytorch
class ExpectedGradientLengthMaxWordTest(unittest.TestCase):
def test_init_default(self):
strategy = ExpectedGradientLengthMaxWord(2, 'embedding')
self.assertEqual(2, strategy.num_classes)
self.assertEqual(50, strategy.batch_size)
self.assertEqual('cuda', strategy.device)
self.assertEqual('embedding', strategy.layer_name)
def test_init(self):
strategy = ExpectedGradientLengthMaxWord(4, 'embedding', batch_size=100, device='cpu')
self.assertEqual(4, strategy.num_classes)
self.assertEqual(100, strategy.batch_size)
self.assertEqual('cpu', strategy.device)
self.assertEqual('embedding', strategy.layer_name)
| 2.546875 | 3 |
pymterm/colour/tango.py | stonewell/pymterm | 102 | 4452 | <gh_stars>100-1000
TANGO_PALLETE = [
'2e2e34343636',
'cccc00000000',
'4e4e9a9a0606',
'c4c4a0a00000',
'34346565a4a4',
'757550507b7b',
'060698989a9a',
'd3d3d7d7cfcf',
'555557575353',
'efef29292929',
'8a8ae2e23434',
'fcfce9e94f4f',
'72729f9fcfcf',
'adad7f7fa8a8',
'3434e2e2e2e2',
'eeeeeeeeecec',
]
def parse_tango_color(c):
r = int(c[:4][:2], 16)
g = int(c[4:8][:2], 16)
b = int(c[8:][:2], 16)
return [r, g, b, 0xFF]
def apply_color(cfg, color_table):
cfg.default_foreground_color = parse_tango_color('eeeeeeeeecec')
cfg.default_background_color = parse_tango_color('323232323232')
cfg.default_cursor_color = cfg.default_foreground_color
for i in range(len(TANGO_PALLETE)):
if i < len(color_table):
color_table[i] = parse_tango_color(TANGO_PALLETE[i])
| 2.1875 | 2 |
user_manager/oauth/oauth2.py | voegtlel/auth-manager-backend | 0 | 4453 | from datetime import datetime, timedelta
from enum import Enum
from typing import List, Optional, Tuple, Dict, Any, Union
import time
from authlib.common.security import generate_token
from authlib.consts import default_json_headers
from authlib.oauth2 import (
OAuth2Request,
AuthorizationServer as _AuthorizationServer,
ResourceProtector as _ResourceProtector,
OAuth2Error,
HttpRequest,
)
from authlib.oauth2.rfc6749 import InvalidClientError
from authlib.oauth2.rfc6749.grants import (
AuthorizationCodeGrant as _AuthorizationCodeGrant,
RefreshTokenGrant as _RefreshTokenGrant,
BaseGrant,
)
from authlib.oauth2.rfc6749.grants import (
ResourceOwnerPasswordCredentialsGrant as _ResourceOwnerPasswordCredentialsGrant,
)
from authlib.oauth2.rfc6749.util import scope_to_list
from authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator, BearerToken as _BearerToken, \
InsufficientScopeError
from authlib.oauth2.rfc8414 import AuthorizationServerMetadata
from authlib.oidc.core import UserInfo
from authlib.oidc.core.grants import (
OpenIDCode as _OpenIDCode,
OpenIDImplicitGrant as _OpenIDImplicitGrant,
OpenIDHybridGrant as _OpenIDHybridGrant,
)
from authlib.oidc.core.grants.util import is_openid_scope, generate_id_token
from fastapi import HTTPException
from starlette.concurrency import run_in_threadpool
from starlette.responses import Response, JSONResponse
from user_manager.common.config import config
from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, \
UserPropertyType
from user_manager.common.mongo import authorization_code_collection, token_collection, \
client_collection, client_user_cache_collection, user_group_collection, async_token_collection, \
async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema
from . import oauth2_key
from .user_helper import UserWithRoles
USERS_SCOPE = '*users'
class TypedRequest(OAuth2Request):
user: UserWithRoles
credential: Union[DbAuthorizationCode, DbToken]
client: DbClient
class RedirectResponse(Response):
def to_json_response(self) -> JSONResponse:
return JSONResponse(
content={'redirect_uri': self.headers['Location']},
status_code=200,
headers=dict(default_json_headers),
)
class ErrorJSONResponse(JSONResponse):
pass
class ErrorRedirectResponse(RedirectResponse):
def to_json_response(self) -> JSONResponse:
return ErrorJSONResponse(
content={'redirect_uri': self.headers['Location']},
status_code=401,
headers=dict(default_json_headers),
)
class AuthorizationServer(_AuthorizationServer):
metadata_class = AuthorizationServerMetadata
def create_oauth2_request(self, request: TypedRequest):
assert isinstance(request, OAuth2Request)
return request
def create_json_request(self, request):
assert isinstance(request, HttpRequest)
raise NotImplementedError()
# TODO: Create HttpRequest with json in body.
def handle_response(self, status_code: int, payload: Optional[dict], headers: List[Tuple[str, str]]):
headers = dict(headers)
if isinstance(payload, dict):
return JSONResponse(payload, status_code=status_code, headers=headers)
elif headers.get('Location'):
assert not payload
return RedirectResponse(status_code=status_code, headers=headers)
assert False
def handle_error_response(self, request: TypedRequest, error: OAuth2Error):
status_code, body, headers = error(
translations=self.get_translations(request),
error_uris=self.get_error_uris(request)
)
headers = dict(headers)
if isinstance(body, dict):
return ErrorJSONResponse(
content=body,
status_code=status_code,
headers=headers,
)
elif headers.get('Location'):
assert not body
return ErrorRedirectResponse(
status_code=status_code,
headers=headers,
)
assert False
def save_authorization_code(code: str, request: TypedRequest):
nonce = request.data.get('nonce')
item = DbAuthorizationCode(
code=code,
client_id=request.client.id,
redirect_uri=request.redirect_uri,
scope=request.scope,
user_id=request.user.user.id,
nonce=nonce,
auth_time=int(time.time()),
expiration_time=datetime.utcnow() + timedelta(seconds=config.oauth2.token_expiration.authorization_code),
)
authorization_code_collection.insert_one(item.document())
return item
class ExistsNonceMixin(object):
def exists_nonce(self, nonce: str, request: TypedRequest):
# exists = mongo.authorization_code_collection.count_documents(
# {'client_id': request.client_id, 'nonce': nonce},
# limit=1,
# )
mod_result = authorization_code_collection.update_one(
{'client_id': request.client_id, 'nonce': nonce},
{'$set': {'nonce': None}},
)
if mod_result.modified_count != 1:
return False
return True
class JwtConfigMixin(object):
jwt_token_expiration: int
def get_jwt_config(self, *args, **kwargs):
return {
'key': oauth2_key.key.key,
'alg': oauth2_key.key.jwk.alg.value,
'iss': config.oauth2.issuer,
'exp': self.jwt_token_expiration,
}
class UserInfoMixin(object):
def _translate_properties(
self,
scope: str,
schema: DbManagerSchema,
) -> List[Tuple[str, DbUserProperty, Optional[str], Optional[bool]]]:
scope_list = ['*'] + scope_to_list(scope)
return [
(prop.valid_key, schema.properties_by_key[prop.user_property], prop.group_type, prop.group_by_name)
for scope_name in scope_list
if scope_name not in ('openid', 'offline_access') and scope_name in schema.scopes_by_key
for prop in schema.scopes_by_key[scope_name].properties
if prop.user_property in schema.properties_by_key
]
def generate_user_info(self, user: UserWithRoles, scope: str):
user_data = {
'roles': user.roles,
}
for key, prop, group_type, group_by_name in self._translate_properties(scope, read_schema()):
if not hasattr(user.user, prop.key):
continue
value = getattr(user.user, prop.key, None)
if prop.type == UserPropertyType.picture:
if value is not None:
value = f"{config.oauth2.base_url}/picture/{value}"
elif prop.type == UserPropertyType.groups:
group_filter = {} if group_type is None else {'group_type': group_type}
value = [
group['group_name'] if group_by_name else group['_id']
for group in user_group_collection.find(
{'_id': {'$in': value}, 'visible': True, **group_filter},
projection={'group_name' if group_by_name else '_id': 1}
)
]
elif prop.type in (
UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token
):
continue
user_data[key] = value
return UserInfo(**user_data)
async def async_generate_user_info(self, user: UserWithRoles, scope: str):
user_data = {
'roles': user.roles,
}
for key, prop, group_type, group_by_name in self._translate_properties(scope, await async_read_schema()):
if not hasattr(user.user, prop.key):
continue
value = getattr(user.user, prop.key, None)
if prop.type == UserPropertyType.picture:
if value is not None:
value = f"{config.oauth2.base_url}/picture/{value}"
elif prop.type == UserPropertyType.groups:
group_filter = {} if group_type is None else {'group_type': group_type}
value = [
group['group_name'] if group_by_name else group['_id']
async for group in async_user_group_collection.find(
{'_id': {'$in': value}, 'visible': True, **group_filter},
projection={'group_name' if group_by_name else '_id': 1}
)
]
elif prop.type in (
UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token
):
continue
user_data[key] = value
return UserInfo(**user_data)
class AuthorizationCodeGrant(_AuthorizationCodeGrant):
TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic', 'client_secret_post']
AUTHORIZATION_CODE_LENGTH = config.oauth2.authorization_code_length
def save_authorization_code(self, code: str, request: TypedRequest):
return save_authorization_code(code, request)
def query_authorization_code(self, code: str, client: DbClient):
auth_code_data = authorization_code_collection.find_one({'_id': code, 'client_id': client.id})
if auth_code_data is None:
return None
auth_code = DbAuthorizationCode.validate_document(auth_code_data)
if auth_code.is_expired():
return None
return auth_code
def delete_authorization_code(self, authorization_code: DbAuthorizationCode):
authorization_code_collection.delete_one({'_id': authorization_code.code})
def authenticate_user(self, authorization_code: DbAuthorizationCode):
return UserWithRoles.load(authorization_code.user_id, authorization_code.client_id)
class ResourceOwnerPasswordCredentialsGrant(_ResourceOwnerPasswordCredentialsGrant):
def authenticate_token_endpoint_client(self):
# Must override this to set the client in the request, to make it available to authenticate_user
client = super(self).authenticate_token_endpoint_client()
self.request.client = client
return client
def authenticate_user(self, username: str, password: str):
user_data = user_collection.find_one({'email': username, 'access_tokens.token': password, 'active': True})
if user_data is None:
return None
return UserWithRoles.load_groups(DbUser.validate_document(user_data), self.client.id)
class OpenIDCode(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDCode):
jwt_token_expiration = config.oauth2.token_expiration.authorization_code
class OpenIDImplicitGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDImplicitGrant):
jwt_token_expiration = config.oauth2.token_expiration.implicit
class OpenIDHybridGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDHybridGrant):
jwt_token_expiration = config.oauth2.token_expiration.implicit
def generate_authorization_code(self) -> str:
return generate_token(config.oauth2.authorization_code_length)
def save_authorization_code(self, code: str, request: TypedRequest):
return save_authorization_code(code, request)
class RefreshTokenGrant(_RefreshTokenGrant):
TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic']
INCLUDE_NEW_REFRESH_TOKEN = True
def authenticate_refresh_token(self, refresh_token: str):
token_data = token_collection.find_one({'refresh_token': refresh_token})
if token_data is None:
return None
auth_code = DbToken.validate_document(token_data)
if auth_code.is_expired():
return None
return auth_code
def authenticate_user(self, credential: DbToken):
return UserWithRoles.load(credential.user_id, credential.client_id)
def revoke_old_credential(self, credential: DbToken):
# token_collection.update_one({'_id': credential.access_token}, {'revoked': True})
token_collection.delete_one({'_id': credential.access_token})
def save_token(token: Dict[str, Any], request: TypedRequest):
if request.user:
user_id = request.user.user.id
else:
user_id = None
now = int(time.time())
token_data = DbToken.validate_document({
'client_id': request.client.id,
'user_id': user_id,
'issued_at': now,
'expiration_time': datetime.utcnow() + timedelta(seconds=token.get('expires_in', 0)),
'scope': request.scope,
'auth_time': request.credential.get_auth_time(),
**token
})
token_collection.insert_one(token_data.document())
return token_data
def query_client(client_id: str):
client_data = client_collection.find_one({'_id': client_id})
if client_data is None:
return None
return DbClient.validate_document(client_data)
async def async_query_client(client_id: str):
client_data = await async_client_collection.find_one({'_id': client_id})
if client_data is None:
return None
return DbClient.validate_document(client_data)
def token_generator(*_):
return generate_token(config.oauth2.token_length)
class AccessTokenGenerator(UserInfoMixin, JwtConfigMixin):
jwt_token_expiration = config.oauth2.token_expiration.authorization_code
def __call__(self, client: DbClient, grant_type: str, user: UserWithRoles, scope: str):
jwt_config = self.get_jwt_config()
jwt_config['aud'] = [client.get_client_id()]
jwt_config['auth_time'] = int(time.time())
user_info = {'sub': user.user.id, 'roles': user.roles}
if 'groups' in scope_to_list(scope):
user_info['groups'] = user.user.groups
return generate_id_token({}, user_info, code=generate_token(config.oauth2.access_token_length), **jwt_config)
def token_expires_in(_, grant_type: str):
return getattr(config.oauth2.token_expiration, grant_type)
class BearerToken(_BearerToken):
def __call__(self, client, grant_type, user=None, scope=None,
expires_in=None, include_refresh_token=True):
if 'offline_access' not in scope_to_list(scope):
include_refresh_token = False
return super(BearerToken, self).__call__(client, grant_type, user, scope, expires_in, include_refresh_token)
authorization = AuthorizationServer(
query_client,
save_token,
BearerToken(AccessTokenGenerator(), expires_generator=token_expires_in, refresh_token_generator=token_generator),
)
class OpenIDSessionState:
def __call__(self, grant: BaseGrant):
grant.register_hook('process_token', self.process_token)
def process_token(self, grant: BaseGrant, token: dict):
scope = token.get('scope')
if not scope or not is_openid_scope(scope):
# standard authorization code flow
return token
token['session_state'] = str(grant.request.user.last_modified)
return token
# support all openid grants
authorization.register_grant(AuthorizationCodeGrant, [OpenIDCode(), OpenIDSessionState()])
authorization.register_grant(OpenIDImplicitGrant)
authorization.register_grant(OpenIDHybridGrant)
authorization.register_grant(RefreshTokenGrant, [OpenIDCode(), OpenIDSessionState()])
authorization.register_grant(ResourceOwnerPasswordCredentialsGrant)
class BearerTokenValidator(_BearerTokenValidator):
def authenticate_token(self, token_string: str):
token_data = token_collection.find_one({'_id': token_string})
if token_data is None:
return None
token = DbToken.validate_document(token_data)
if client_user_cache_collection.count_documents({
'client_id': token.client_id,
'user_id': token.user_id,
}) != 1:
return None
return token
def request_invalid(self, request: TypedRequest):
return False
def token_revoked(self, token: DbToken):
return token.revoked
class ResourceProtector(_ResourceProtector):
def validate(self, request: OAuth2Request, scope: str = None, scope_operator='AND') -> DbToken:
assert isinstance(request, OAuth2Request)
return self.validate_request(scope, request, scope_operator)
class UserIntrospection(UserInfoMixin):
async def create_response(self, request: TypedRequest) -> Response:
try:
assert isinstance(request, OAuth2Request)
request.token = await run_in_threadpool(resource_protector.validate_request, None, request)
if request.token is None:
raise HTTPException(403, "Invalid token")
request.user = await UserWithRoles.async_load(request.token.user_id, request.token.client_id)
user_info = await self.async_generate_user_info(request.user, request.token.scope)
return JSONResponse(user_info)
except OAuth2Error as error:
return authorization.handle_error_response(request, error)
class RequestOriginVerifier:
async def create_response(self, request: TypedRequest, origin: str) -> Optional[Response]:
try:
assert isinstance(request, OAuth2Request)
request.token = await run_in_threadpool(resource_protector.validate_request, None, request)
if request.token is None:
raise HTTPException(403, "Invalid token")
request.client = await async_query_client(request.token.client_id)
if request.client is None:
raise HTTPException(403, "Invalid client in token")
if not request.client.check_redirect_uri(origin):
raise HTTPException(403, "Allowed redirect uri does not match request")
return None
except OAuth2Error as error:
return authorization.handle_error_response(request, error)
class OtherUserInspection(UserInfoMixin):
async def create_response(self, request: TypedRequest, user_id: str, client_auth: dict = None) -> Response:
try:
assert isinstance(request, OAuth2Request)
if request.client is None:
request.token = await run_in_threadpool(resource_protector.validate_request, None, request)
if request.token is None:
raise HTTPException(403, "Invalid token")
client_id = request.token.client_id
scopes = request.token.scope
scope = USERS_SCOPE
else:
client_id = request.client_id
scopes = request.client.allowed_scope
scope = scopes
if USERS_SCOPE not in scope_to_list(scopes):
raise InsufficientScopeError('Missing "*users" scope', request.uri)
user = await UserWithRoles.async_load(user_id, client_id)
if user is None:
raise HTTPException(404, "User not found")
user_info = await self.async_generate_user_info(user, scope)
return JSONResponse(user_info)
except OAuth2Error as error:
return authorization.handle_error_response(request, error)
class OtherUsersInspection(UserInfoMixin):
async def create_response(self, request: TypedRequest) -> Response:
try:
assert isinstance(request, OAuth2Request)
if request.client is None:
request.token = await run_in_threadpool(resource_protector.validate_request, None, request)
if request.token is None:
raise HTTPException(403, "Invalid token")
client_id = request.token.client_id
scopes = request.token.scope
scope = USERS_SCOPE
load_roles = False
else:
client_id = request.client_id
scopes = request.client.allowed_scope
scope = scopes
load_roles = True
if USERS_SCOPE not in scope_to_list(scopes):
raise InsufficientScopeError('Missing "*users" scope', request.uri)
user_infos = []
for user in await UserWithRoles.async_load_all(client_id, load_roles=load_roles):
user_info = await self.async_generate_user_info(user, scope)
if not load_roles:
del user_info['roles']
user_infos.append(user_info)
return JSONResponse(user_infos)
except OAuth2Error as error:
return authorization.handle_error_response(request, error)
class TypeHint(str, Enum):
AccessToken = "access_token"
RefreshToken = "refresh_token"
class RevocationEndpoint:
async def create_response(
self, raw_token: str, token_type_hint: Optional[TypeHint], request: TypedRequest
) -> Response:
token_data = None
if token_type_hint is None or token_type_hint == TypeHint.AccessToken:
token_data = await async_token_collection.find_one({'_id': raw_token})
if token_data is None and (token_type_hint is None or token_type_hint == TypeHint.RefreshToken):
token_data = await async_token_collection.find_one({'refresh_token': raw_token})
if token_data is None:
return Response()
token = DbToken.validate_document(token_data)
try:
if request.client_id is None:
request.data['client_id'] = token.client_id
elif token.client_id != request.client_id:
raise InvalidClientError(state=request.state, status_code=401)
await run_in_threadpool(
authorization.authenticate_client, request, ["none", "client_secret_basic", "client_secret_post"]
)
# await async_token_collection.update_one({'_id': token.access_token}, {'$set': {'revoked': True}})
# token_collection.update_one({'_id': credential.access_token}, {'revoked': True})
await async_token_collection.delete_one({'_id': token.access_token})
return Response()
except OAuth2Error as error:
return authorization.handle_error_response(request, error)
resource_protector = ResourceProtector()
resource_protector.register_token_validator(BearerTokenValidator())
user_introspection = UserIntrospection()
token_revocation = RevocationEndpoint()
request_origin_verifier = RequestOriginVerifier()
other_user_inspection = OtherUserInspection()
other_users_inspection = OtherUsersInspection()
| 1.421875 | 1 |
src/adsb/sbs/server.py | claws/adsb | 7 | 4454 | <reponame>claws/adsb
import asyncio
import datetime
import logging
import socket
from . import protocol
from typing import Tuple
from asyncio import AbstractEventLoop
logger = logging.getLogger(__name__)
class Server(object):
def __init__(
self,
host: str = "localhost",
port: int = 30003,
backlog=100,
loop: AbstractEventLoop = None,
) -> None:
self.loop = loop or asyncio.get_event_loop()
self.host = host
self._requested_port = port
self.port = None
self.backlog = backlog
self.listener = None
self.protocols = {}
async def start(self) -> None:
""" Start the server """
try:
self.listener = await self.loop.create_server(
lambda: protocol.SBSServerProtocol(self),
self.host,
self._requested_port,
family=socket.AF_INET,
backlog=self.backlog,
) # type: asyncio.Server
# Fetch actual port in use. This can be different from the
# specified port if the port was passed as 0 which means use
# an ephemeral port.
assert len(self.listener.sockets) == 1
_, self.port = self.listener.sockets[0].getsockname()
except asyncio.CancelledError:
logger.exception("Connection waiter Future was cancelled")
except Exception:
logger.exception("An error occurred in start")
async def stop(self) -> None:
""" Stop the server """
if self.listener:
# Avoid iterating over the protocols dict which may change size
# while it is being iterating over.
peers = list(self.protocols)
for peer in peers:
prot = self.protocols.get(peer)
if prot:
prot.close()
self.listener.close()
def register_protocol(
self, peer: Tuple[str, int], prot: "SBSServerProtocol"
) -> None:
""" Register a protocol instance with the server.
:param peer: Tuple of (host:str, port:int).
:param prot: a SBSServerProtocol instance.
"""
self.protocols[peer] = prot
def deregister_protocol(self, peer: Tuple[str, int]) -> None:
""" De-register a protocol instance from the server.
This peer will no longer receive messages.
:param peer: Tuple of (host:str, port:int).
"""
del self.protocols[peer]
def send_message(self, msg: bytes, peer: Tuple[str, int] = None) -> None:
""" Send a message.
:param msg: A bytes object representing the SBS format message to
send to peers. The message is assumed to include the end of
message delimiter.
:param peer: A specific peer to send the message to. Peer is a
Tuple of (host:str, port:int). If not specified then the message
is broadcast to all peers.
"""
if self.protocols:
if peer:
prot = self.protocols.get(peer)
if prot:
prot.send_message(msg)
else:
raise Exception(
f"Server can't send msg to non-existant peer: {peer}"
)
else:
# broadcast message to all peers
for peer, prot in self.protocols.items():
prot.send_message(msg)
else:
raise Exception("Server can't send msg, no peers available")
| 2.53125 | 3 |
src/robusta/core/model/events.py | kandahk/robusta | 0 | 4455 | <gh_stars>0
import logging
import uuid
from enum import Enum
from typing import List, Optional, Dict, Any
from dataclasses import dataclass, field
from pydantic import BaseModel
from ...integrations.scheduled.playbook_scheduler import PlaybooksScheduler
from ..reporting.base import Finding, BaseBlock
class EventType(Enum):
KUBERNETES_TOPOLOGY_CHANGE = 1
PROMETHEUS = 2
MANUAL_TRIGGER = 3
SCHEDULED_TRIGGER = 4
class ExecutionEventBaseParams(BaseModel):
named_sinks: Optional[List[str]] = None
# Right now:
# 1. this is a dataclass but we need to make all fields optional in subclasses because of https://stackoverflow.com/questions/51575931/
# 2. this can't be a pydantic BaseModel because of various pydantic bugs (see https://github.com/samuelcolvin/pydantic/pull/2557)
# once the pydantic PR that addresses those issues is merged, this should be a pydantic class
# (note that we need to integrate with dataclasses because of hikaru)
@dataclass
class ExecutionBaseEvent:
findings: Dict[str, Finding] = field(default_factory=lambda: {})
named_sinks: Optional[List[str]] = None
response: Dict[
str, Any
] = None # Response returned to caller. For admission or manual triggers for example
stop_processing: bool = False
_scheduler: Optional[PlaybooksScheduler] = None
def set_scheduler(self, scheduler: PlaybooksScheduler):
self._scheduler = scheduler
def get_scheduler(self) -> PlaybooksScheduler:
return self._scheduler
def create_default_finding(self) -> Finding:
"""Create finding default fields according to the event type"""
return Finding(title="Generic Finding", aggregation_key="Generic finding key")
def add_enrichment(
self,
enrichment_blocks: List[BaseBlock],
annotations=None,
finding_key: str = "DEFAULT",
):
finding = self.findings.get(finding_key)
if not finding:
finding = self.create_default_finding()
self.findings[finding_key] = finding
finding.add_enrichment(enrichment_blocks, annotations)
def add_finding(self, finding: Finding, finding_key: str = None):
if (
not finding_key
): # user didn't specify a key, so this finding shouldn't be accessed by key. Randomise it
finding_key = str(uuid.uuid4())
existing_finding = self.findings.get(finding_key)
if existing_finding:
logging.warning(
f"Overriding existing finding. finding_key: {finding_key} new finding: {finding}"
)
self.findings[finding_key] = finding
@staticmethod
def from_params(params: ExecutionEventBaseParams) -> Optional["ExecutionBaseEvent"]:
return ExecutionBaseEvent(named_sinks=params.named_sinks)
| 2.125 | 2 |
examples/django_mongoengine/bike/models.py | pfrantz/graphene-mongo | 260 | 4456 | <filename>examples/django_mongoengine/bike/models.py
from mongoengine import Document
from mongoengine.fields import (
FloatField,
StringField,
ListField,
URLField,
ObjectIdField,
)
class Shop(Document):
meta = {"collection": "shop"}
ID = ObjectIdField()
name = StringField()
address = StringField()
website = URLField()
class Bike(Document):
meta = {"collection": "bike"}
ID = ObjectIdField()
name = StringField()
brand = StringField()
year = StringField()
size = ListField(StringField())
wheel_size = FloatField()
type = StringField()
| 2.53125 | 3 |
src/tensor/tensor/movement/__init__.py | jedhsu/tensor | 0 | 4457 | from ._movement import Movement
from .path import MovementPath
from .paths import MovementPaths
| 1.179688 | 1 |
uhd_restpy/testplatform/sessions/ixnetwork/impairment/profile/fixedclassifier/fixedclassifier.py | OpenIxia/ixnetwork_restpy | 20 | 4458 | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class FixedClassifier(Base):
"""Specifies the packets to apply this profile to. If there are multiple patterns enabled, they are ANDed: each packet must match all packets in order to be impaired by this profile.
The FixedClassifier class encapsulates a list of fixedClassifier resources that are managed by the user.
A list of resources can be retrieved from the server using the FixedClassifier.find() method.
The list can be managed by using the FixedClassifier.add() and FixedClassifier.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'fixedClassifier'
_SDM_ATT_MAP = {
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(FixedClassifier, self).__init__(parent, list_op)
@property
def Pattern(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern): An instance of the Pattern class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern
if self._properties.get('Pattern', None) is not None:
return self._properties.get('Pattern')
else:
return Pattern(self)
def add(self):
"""Adds a new fixedClassifier resource on the server and adds it to the container.
Returns
-------
- self: This instance with all currently retrieved fixedClassifier resources using find and the newly added fixedClassifier resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained fixedClassifier resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self):
"""Finds and retrieves fixedClassifier resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve fixedClassifier resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all fixedClassifier resources from the server.
Returns
-------
- self: This instance with matching fixedClassifier resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of fixedClassifier data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the fixedClassifier resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 1.59375 | 2 |
lantz/drivers/sacher/Sacher_EPOS.py | mtsolmn/lantz-drivers | 4 | 4459 | # sacher_epos.py, python wrapper for sacher epos motor
# <NAME> <<EMAIL>>, August 2014
#
"""
Possbily Maxon EPOS now
"""
"""
This is the actual version that works
But only in the lab32 virtual environment
"""
# from instrument import Instrument
# import qt
import ctypes
import ctypes.wintypes
import logging
import time
# from instrument import Instrument
from ctypes.wintypes import DWORD, WORD
import numpy as np
"""
okay so we import a bunch of random stuff
I always forget what ctypes is for but I'll worry about it later
"""
# from subprocess import Popen, PIPE
# from multiprocessing.managers import BaseManager
# import atexit
# import os
# python32_dir = "C:\\Users\\Alex\\Miniconda3\\envs\\lab32"
# assert os.path.isdir(python32_dir)
# os.chdir(python32_dir)
# derp = "C:\\Users\\Alex\\Documents\\wow_such_code"
# assert os.path.isdir(derp)
# os.chdir(derp)
# p = Popen([python32_dir + "\\python.exe", derp + "\\delegate.py"], stdout=PIPE, cwd=derp)
# atexit.register(p.terminate)
# port = int(p.stdout.readline())
# authkey = p.stdout.read()
# print(port, authkey)
# m = BaseManager(address=("localhost", port), authkey=authkey)
# m.connect()
# tell manager to expect an attribute called LibC
# m.register("SacherLasaTeknique")
# access and use libc
# libc = m.SacherLasaTeknique()
# print(libc.vcs())
# eposlib = ctypes.windll.eposcmd
eposlib = ctypes.windll.LoadLibrary('C:\\Users\\Carbro\\Desktop\\Charmander\\EposCmd.dll')
DeviceName = b'EPOS'
ProtocolStackName = b'MAXON_RS232'
InterfaceName = b'RS232'
"""
Max on
Max off
but anyway it looks like ctypes is the thing that's talking to the epos dll
"""
HISTCHAN = 65536
TTREADMAX = 131072
RANGES = 8
MODE_HIST = 0
MODE_T2 = 2
MODE_T3 = 3
FLAG_OVERFLOW = 0x0040
FLAG_FIFOFULL = 0x0003
# in mV
ZCMIN = 0
ZCMAX = 20
DISCRMIN = 0
DISCRMAX = 800
# in ps
OFFSETMIN = 0
OFFSETMAX = 1000000000
# in ms
ACQTMIN = 1
ACQTMAX = 10 * 60 * 60 * 1000
# in mV
PHR800LVMIN = -1600
PHR800LVMAX = 2400
"""
wooooooo a bunch a variables and none of them are explained
way to go dc you da real champ
"""
class Sacher_EPOS():
"""
ok before I dive into this giant Sacher class thing let me just list here all the functions that are being defined in this class:
check(self)
before
wreck(self)
ok but actually:
__init__(self, name, address, reset=False)
__del__(self)
get_bit(self, byteval,idx)
_u32todouble(self, uinput)
open(self)
close(self)
get_offset(self)
fine_tuning_steps(self, steps)
set_new_offset(self, new_offset)
get_motor_position(self)
set_target_position(self, target, absolute, immediately)
do_get_wavelength(self)
do_set_wavelength(self, wavelength)
is_open(self)
clear_fault(self)
initialize(self)
The last one is really long
And also damn there are 16 of them
I'll comment about them as I go through them
"""
def __init__(self, name, address, reset=False):
# Instrument.__init__(self, name, tags=['physical'])
# self._port_name = str(address)
self._port_name = address
self._is_open = False
self._HPM = True
# self.add_parameter('wavelength',
# flags = Instrument.FLAG_GETSET,
# type = types.FloatType,
# units = 'nm',
# minval=1070.0,maxval=1180.0)
# self.add_function('open')
# self.add_function('close')
# self.add_function('fine_tuning_steps')
# self.add_function('get_motor_position')
# self.add_function('set_target_position')
# try:
self.open()
self.initialize()
# except:
# logging.error('Error loading Sacher EPOS motor. In use?')
"""
I mean to me this really seems like the initialize function
so I wonder what initialize(self) is doing
At any rate there doesn't seem to be a lot going on here
"""
def __del__(self):
# execute disconnect
self.close()
return
"""
this might be the only self explanatory one
it disconnects
"""
@staticmethod
def get_bit(byteval, idx):
# def get_bit(self, byteval,idx):
return ((byteval & (1 << idx)) != 0)
"""
you get the bits, and then you use them
but honestly I don't really get what this is doing
sudo git a_clue
"""
@staticmethod
def _u32todouble(uinput):
# def _u32todouble(self, uinput):
# this function implements the really weird/non-standard U32 to
# floating point conversion in the sacher VIs
# get sign of number
sign = Sacher_EPOS.get_bit(uinput, 31)
if sign == False:
mantissa_sign = 1
elif sign == True:
mantissa_sign = -1
exp_mask = 0b111111
# print 'uin u is %d' % uinput
# print 'type uin %s' % type(uinput)
# print 'binary input is %s' % bin(long(uinput))
# get sign of exponent
if Sacher_EPOS.get_bit(uinput, 7) == False:
exp_sign = 1
elif Sacher_EPOS.get_bit(uinput, 7) == True:
exp_sign = -1
# print 'exp extract %s' % bin(int(uinput & exp_mask))
# print 'exp conv %s' % (exp_sign*int(uinput & exp_mask))
# print 'sign of exponent %s' % self.get_bit(uinput,7)
# print 'binary constant is %s' % bin(int(0b10000000000000000000000000000000))
mantissa_mask = 0b01111111111111111111111100000000
# mantissa_mask = 0b0111111111111111111111110000000
# print 'mantissa extract is %s' % bin((uinput & mantissa_mask) >> 8)
mantissa = 1.0 / 1000000.0 * float(mantissa_sign) * float((uinput & mantissa_mask) >> 8)
# print 'mantissa is %.12f' % mantissa
# print(1 if Sacher_EPOS.get_bit(uinput,31) else 0, mantissa, 1 if Sacher_EPOS.get_bit(uinput,7) else 0, uinput & exp_mask)
output = mantissa * 2.0 ** (float(exp_sign) * float(int(uinput & exp_mask)))
# print 'output is %s' % output
return output
"""
ok dc gave some slight explanations here
Apparently there's a "really weird/non-standard U32 to floating point conversion in the sacher VIs"
It'd be gr8 if I knew what U32's were
unsigned 32 bit something something?
ah whatever
I'll have to worry about this later
"""
@staticmethod
def _doubletou32(dinput):
mantissa_bit = 0 if int(dinput / abs(dinput)) > 0 else 1
exp_bit = 1 if -1 < dinput < 1 else 0
b = np.ceil(np.log10(abs(dinput)))
a = dinput / 10 ** b
if dinput < 0:
a = -a
# print('a:\t{}\tb:\t{}'.format(a, b))
d = np.log2(10) * b
d_ = np.ceil(d)
c = a * 2 ** (d - d_)
# print('c:\t{}\td_:{}\toriginal:\t{}'.format(c, d_, c * 2 ** d_))
return (int(mantissa_bit) << 31) + (int(c * 1e6) << 8) + (int(exp_bit) << 7) + int(abs(d_))
def open(self):
eposlib.VCS_OpenDevice.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p,
ctypes.POINTER(DWORD)]
eposlib.VCS_OpenDevice.restype = ctypes.wintypes.HANDLE
buf = ctypes.pointer(DWORD(0))
ret = ctypes.wintypes.HANDLE()
# print 'types are all %s %s %s %s %s' % (type(DeviceName), type(ProtocolStackName), type(InterfaceName), type(self._port_name), type(buf))
ret = eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName, InterfaceName, self._port_name, buf)
self._keyhandle = ret
# print 'keyhandle is %s' % self._keyhandle
# print 'open device ret %s' % buf
# print 'printing'
# print buf.contents.value
# print 'done printer'
if int(buf.contents.value) >= 0:
self._is_open = True
self._keyhandle = ret
return
"""
I have absolutely no idea what the hell this is doing
Considering that close(self) is apparently closing the EPOS motor, maybe this is opening it
"""
def close(self):
print('closing EPOS motor.')
eposlib.VCS_CloseDevice.argtypes = [ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)]
eposlib.VCS_CloseDevice.restype = ctypes.wintypes.BOOL
buf = ctypes.pointer(DWORD(0))
ret = ctypes.wintypes.BOOL()
ret = eposlib.VCS_CloseDevice(self._keyhandle, buf)
# print 'close device returned %s' % buf
if int(buf.contents.value) >= 0:
self._is_open = False
else:
logging.error(__name__ + ' did not close Sacher EPOS motor correctly.')
return
"""
Apparently this closes the EPOS motor
I don't know what "opening" and "closing" the motor means though
and yeah also these random variables don't make any sense to me
"""
def get_motor_current(self):
nodeID = ctypes.wintypes.WORD(0)
eposlib.VCS_GetCurrentIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetCurrentIs.restype = ctypes.wintypes.BOOL
motorCurrent = ctypes.c_uint8(0)
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID, ctypes.byref(motorCurrent), ctypes.byref(buf))
return motorCurrent.value
"""
Not sure what this is doing yet
"""
def find_home(self):
nodeID = ctypes.wintypes.WORD(0)
eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_uint8,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_FindHome(self._keyhandle, nodeID, ctypes.c_uint8(35), ctypes.byref(buf))
print('Homing: {}'.format(ret))
return ret
"""
Not sure what this is doing yet
"""
def restore(self):
nodeID = ctypes.wintypes.WORD(0)
eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_Restore(self._keyhandle, nodeID, ctypes.byref(buf))
print('Restore: {}'.format(ret))
return ret
"""
Not sure what this is doing yet
"""
def get_offset(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# These are hardcoded values I got from the LabVIEW program -- I don't think
# any documentation exists on particular object indices
StoredPositionObject = ctypes.wintypes.WORD(8321)
StoredPositionObjectSubindex = ctypes.c_uint8(0)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32))
if ret == 0:
logging.error(__name__ + ' Could not read stored position from Sacher EPOS motor')
return CastedObjectData[0]
"""
Not sure what this is doing yet
"""
def fine_tuning_steps(self, steps):
current_motor_pos = self.get_motor_position()
self._offset = self.get_offset()
self.set_target_position(steps, False, True)
new_motor_pos = self.get_motor_position()
# print('New motor position is %s' % new_motor_pos)
# print 'new offset is %s' % (new_motor_pos-current_motor_pos+self._offset)
self.set_new_offset(new_motor_pos - current_motor_pos + self._offset)
"""
Not sure what this is doing yet
"""
def set_new_offset(self, new_offset):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL
# print 'setting new offset'
StoredPositionObject = ctypes.wintypes.WORD(8321)
StoredPositionObjectSubindex = ctypes.c_uint8(0)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(new_offset)
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
if ret == 0:
logging.error(__name__ + ' Could not write stored position from Sacher EPOS motor')
return
"""
Not sure what this is doing yet
"""
def set_coeffs(self, a, b, c, min_wl, max_wl):
print('')
print("setting coefficients...")
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL
# print 'setting new offset'
d = (min_wl << 16) + max_wl
StoredPositionObject = ctypes.wintypes.WORD(8204)
for subidx, coeff in enumerate([a, b, c]):
print(subidx, coeff)
StoredPositionObjectSubindex = ctypes.c_uint8(subidx + 1)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(self._doubletou32(coeff))
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
StoredPositionObjectSubindex = ctypes.c_uint8(4)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(d)
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
if ret == 0:
logging.error(__name__ + ' Could not write stored position from Sacher EPOS motor')
return
"""
Not sure what this is doing yet
"""
def get_motor_position(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
pPosition = ctypes.pointer(ctypes.c_long())
eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL
ret = eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition, ctypes.byref(buf))
# print 'get motor position ret %s' % ret
# print 'get motor position buf %s' % buf.value
# print 'get motor position value %s' % pPosition.contents.value
return pPosition.contents.value
# print('getting motor position...')
# print(ret)
# return print(pPosition.contents.value)
"""
Not sure what this is doing yet
"""
def set_target_position(self, target, absolute, immediately):
# print('check #1')
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# First, set enabled state
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
ret = eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf))
# print('Enable state ret %s buf %s' % (ret, buf.value))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
pTarget = ctypes.c_long(target)
pAbsolute = ctypes.wintypes.BOOL(absolute)
pImmediately = ctypes.wintypes.BOOL(immediately)
eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long,
ctypes.wintypes.BOOL, ctypes.wintypes.BOOL,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL
# print('check #2')
# print('About to set motor position')
# print('Current motor position is %d' % (self.get_motor_position()))
ret = eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget, pAbsolute, pImmediately, ctypes.byref(buf))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('set motor position ret %s' % ret)
# print('set motor position buf %s' % buf.value)
steps_per_second = 14494.0 # hardcoded, estimated roughly, unused now
nchecks = 0
# print('check #3')
while nchecks < 1000:
# get the movement state. a movement state of 1 indicates the motor
# is done moving
# print('')
# print('check #4')
# print('Motor current: {}'.format(self.get_motor_current()))
print('Motor position: {}'.format(self.get_motor_position()))
# print('Motor offset: {}'.format(self.get_offset()))
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
pMovementState = ctypes.pointer(ctypes.wintypes.BOOL())
# print(pMovementState.contents.value)
eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.wintypes.BOOL),
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL
# print('Getting movement state')
ret = eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState, ctypes.byref(buf))
# print('set motor position ret %s' % ret)
# print('set motor position buf %s' % buf.value)
# print('Movement state is %s' % pMovementState.contents.value)
if pMovementState.contents.value == 1:
break
nchecks = nchecks + 1
# print('Current motor position is %d' % self.get_motor_position())
# print('check #5')
# print(nchecks)
# print('')
time.sleep(0.01)
# Now set disabled state
ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf))
# print('check #6')
# print('Disable state ret %s buf %s' % (ret, buf.value))
# print('Final motor position is %d' % (self.get_motor_position()))
# print('check #7')
return ret
"""
Not sure what this is doing yet
"""
def fuck_my_life(self, wavelength):
print('goddamn this piece of shit')
print('')
print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
# print('#3 Motor current: {}'.format(self.get_motor_current()))
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# Step 1: Get the actual motor position
# print('Getting motor position')
current_motor_pos = self.get_motor_position()
# Step 2: Get the motor offset
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
# Step 3: Convert the desired wavelength into a position
# Check sign of position-to-wavelength
pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC
pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC
# logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a wrong wavelength direction')
# If that's OK, use the quadratic formula to calculate the roots
b2a = -1.0 * self._doubleB / (2.0 * self._doubleA)
sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA
# print('wut da fuuuu')
# print(b2a)
# print(sqrtarg)
# print(pos0)
# print(pos5000)
if sqrtarg < 0.0:
logging.error(__name__ + ' Negative value under square root sign -- something is wrong')
if pos0 > pos5000:
# Take the + square root solution
x = b2a - np.sqrt(sqrtarg)
elif pos0 < pos5000:
x = b2a + np.sqrt(sqrtarg)
print(b2a)
print(np.sqrt(sqrtarg))
# print('Position is %s' % x)
wavelength_to_pos = int(round(x))
# Step 4: Calculate difference between the output position and the stored offset
# print('Step 4...')
diff_wavelength_offset = wavelength_to_pos - int(self._offset)
print('wavelength_to_pos: {}'.format(wavelength_to_pos))
print('diff_wavelength_offset: {}'.format(diff_wavelength_offset))
print('self._offset: {}'.format(int(self._offset)))
"""
Not sure what this is doing yet
"""
def do_get_wavelength(self):
self._offset = self.get_offset()
# self._currentwl = self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset + self._doubleC
self._currentwl = self._doubleA * (
self.get_motor_position()) ** 2.0 + self._doubleB * self.get_motor_position() + self._doubleC
print('Current wavelength: %.3f nm' % self._currentwl)
return self._currentwl
"""
Not sure what this is doing yet
"""
def do_set_wavelength(self, wavelength):
print('setting wavelength...')
print('')
# print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
# print('#3 Motor current: {}'.format(self.get_motor_current()))
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# Step 1: Get the actual motor position
# print('Getting motor position')
current_motor_pos = self.get_motor_position()
# Step 2: Get the motor offset
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
# Step 3: Convert the desired wavelength into a position
# Check sign of position-to-wavelength
pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC
pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC
# logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a wrong wavelength direction')
# If that's OK, use the quadratic formula to calculate the roots
b2a = -1.0 * self._doubleB / (2.0 * self._doubleA)
sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA
# print('wut da fuuuu')
# print(b2a)
# print(sqrtarg)
# print(pos0)
# print(pos5000)
if sqrtarg < 0.0:
logging.error(__name__ + ' Negative value under square root sign -- something is wrong')
if pos0 > pos5000:
# Take the + square root solution
x = b2a - np.sqrt(sqrtarg)
elif pos0 < pos5000:
x = b2a + np.sqrt(sqrtarg)
# x is what the motor position should be
# print('Position is %s' % x)
wavelength_to_pos = int(round(x))
# Step 4: Calculate difference between the output position and the stored offset
# print('Step 4...')
diff_wavelength_offset = wavelength_to_pos - int(self._offset)
# print('Diff wavelength offset %s' % diff_wavelength_offset)
# Step 5: If HPM is activated and the wavelength position is lower, overshoot
# the movement by 10,000 steps
# print('Step 5...')
# print('#4 Motor current: {}'.format(self.get_motor_current()))
if 1 == 2:
print('uh-oh')
# if self._HPM and diff_wavelength_offset < 0:
#
# print('Overshooting by 10000')
#
# self.set_target_position(diff_wavelength_offset - 10000, False, True)
# # Step 6: Set the real target position
#
# """
# HEY LOOK EVERYONE RIGHT ABOVE HERE THIS IS THE STUPID THING THAT'S NOT WORKING!
# """
#
# #print('Step 6a... diff wavelength')
#
# self.set_target_position(10000, False, True)
else:
# print('Step 6b... diff wavelength')
# self.set_target_position(diff_wavelength_offset, False, True)
"""WRONG"""
self.set_target_position(wavelength_to_pos, True, True)
"""this is the real shit right here
I need to set the absolute position to true
"""
# self.set_target_position(10000, False, True)
# Step 7: Get the actual motor position
new_motor_pos = self.get_motor_position()
# print('New motor position is %s' % new_motor_pos)
# print('new offset is %s' % (new_motor_pos-current_motor_pos+self._offset))
self.set_new_offset(new_motor_pos - current_motor_pos + self._offset)
# Step 8, get and print current wavelength
# print('Current wavelength is %.3f' % self.do_get_wavelength())
# print('setting wavelength done')
return
"""
Not sure what this is doing yet
"""
def is_open(self):
return self._is_open
"""
Not sure what this is doing yet
"""
def clear_fault(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf))
print('clear fault buf %s, ret %s' % (buf, ret))
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
"""
Not sure what this is doing yet
"""
def initialize(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
BaudRate = DWORD(38400)
Timeout = DWORD(100)
ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate, Timeout, ctypes.byref(buf))
# print 'set protocol buf %s ret %s' % (buf, ret)
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
# eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf))
# print 'clear fault buf %s, ret %s' % (buf, ret)
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
buf = ctypes.wintypes.DWORD(0)
plsenabled = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_GetEnableState(self._keyhandle, nodeID, ctypes.byref(plsenabled), ctypes.byref(buf))
# print 'get enable state buf %s ret %s and en %s' % (buf, ret, plsenabled)
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
if int(plsenabled.value) != 0:
logging.warning(__name__ + ' EPOS motor enabled, disabling before proceeding.')
ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf))
if int(ret) != 0:
logging.warning(__name__ + ' EPOS motor successfully disabled, proceeding')
else:
logging.error(__name__ + ' EPOS motor was not successfully disabled!')
buf = ctypes.wintypes.DWORD(0)
Counts = WORD(512) # incremental encoder counts in pulses per turn
PositionSensorType = WORD(4)
ret = eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID, Counts, PositionSensorType, ctypes.byref(buf))
## if ret == int(0):
## print 'errr'
## errbuf = ctypes.create_string_buffer(64)
## print 'sending'
## eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL
## print 'boolerrorinfo'
## eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD]
## print 'arg'
##
## ret = eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64))
## print 'err'
## raise ValueError(errbuf.value)
# For some reason, it appears normal in the LabVIEW code that this
# function actually returns an error, i.e. the return value is zero
# and the buffer has a non-zero error code in it; the LabVIEW code
# doesn't check it.
# Also, it appears that in the 2005 version of this DLL, the function
# VCS_GetErrorInfo doesn't exist!
# Get operation mode, check if it's 1 -- this is "profile position mode"
buf = ctypes.wintypes.DWORD(0)
pMode = ctypes.pointer(ctypes.c_int8())
eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL
ret = eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode, ctypes.byref(buf))
# if mode is not 1, make it 1
if pMode.contents.value != 1:
eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL
pMode_setting = ctypes.c_int8(1)
ret = eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting, ctypes.byref(buf))
eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL
pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD())
pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD())
pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD())
ret = eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration,
pProfileDeceleration, ctypes.byref(buf))
print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value, pProfileDeceleration.contents.value)
if (int(pProfileVelocity.contents.value) > int(11400) or int(pProfileAcceleration.contents.value) > int(
60000) or int(pProfileDeceleration.contents.value) > int(60000)):
eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.wintypes.DWORD, ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL
pProfileVelocity = ctypes.wintypes.DWORD(429)
pProfileAcceleration = ctypes.wintypes.DWORD(429)
pProfileDeceleration = ctypes.wintypes.DWORD(429)
logging.warning(__name__ + ' GetPositionProfile out of bounds, resetting...')
ret = eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration,
pProfileDeceleration, ctypes.byref(buf))
# Now get the motor position (stored position offset)
# from the device's "homposition" object
self._offset = self.get_offset()
# Now read the stored 'calculation parameters'
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# More hardcoded values
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(1)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefA = CastedObjectData[0]
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# Get coefficient B
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(2)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefB = CastedObjectData[0]
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# These are hardcoded values I got from the LabVIEW program -- I don't think
# any documentation exists on particular object indices
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(3)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefC = CastedObjectData[0]
# Get coefficient D
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# These are hardcoded values I got from the LabVIEW program -- I don't think
# any documentation exists on particular object indices
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(4)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefD = CastedObjectData[0]
# print 'coefficients are %s %s %s %s' % (self._coefA, self._coefB, self._coefC, self._coefD)
self._doubleA = self._u32todouble(self._coefA)
self._doubleB = self._u32todouble(self._coefB)
self._doubleC = self._u32todouble(self._coefC)
firstHalf = np.int16(self._coefD >> 16)
secondHalf = np.int16(self._coefD & 0xffff)
# Set the minimum and maximum wavelengths for the motor
self._minwl = float(firstHalf) / 10.0
self._maxwl = float(secondHalf) / 10.0
# print 'first %s second %s' % (firstHalf, secondHalf)
# This returns '10871' and '11859' for the Sacher, which are the correct
# wavelength ranges in Angstroms
# print 'Now calculate the current wavelength position:'
self._currentwl = self._doubleA * (self._offset) ** 2.0 + self._doubleB * self._offset + self._doubleC
print('Current wavelength: %.3f nm' % self._currentwl)
print('initializing done')
return True
"""
Not sure what this is doing yet
"""
"""
Also we're done with the Sacher_EPOS() class at this point
"""
if __name__ == '__main__':
epos = Sacher_EPOS(None, b'COM3')
# epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860)
# epos.do_get_wavelength()
# print('#1 Motor current: {}'.format(epos.get_motor_current()))
# epos.do_get_wavelength()
# print('motor position is...')
# current_pos = epos.get_motor_position()
# print('current position is {}'.format(current_pos))
# new_pos = current_pos + 10000
# epos.set_target_position(new_pos, True, True)
# print(epos.get_motor_position())
# print('#2 Motor current: {}'.format(epos.get_motor_current()))
# epos.find_home()
# epos.restore()
# time.sleep(7)
epos.do_set_wavelength(1151.5)
# epos.do_get_wavelength()
print('Motor current: {}'.format(epos.get_motor_current()))
print('Motor position: {}'.format(epos.get_motor_position()))
"""
OTHER MISC. NOTES:
increasing wavelength:
causes the square to rotate left
causes base to move to the left when square is stuck in
causes screw to loosen
causes large gold base to tighten
decreasing wavelength:
there's an overshoot when lowering wavelength
causes the square to rotate right
causes base to move to the right when square is stuck in
causes screw to tighten
causes large gold base to loosen, and also unplug the motor
Also you don't need to explicitly run epos.initialize() because there's an __init__ function which contains epos.initialize()
"""
# womp the end
| 1.90625 | 2 |
tools/generate_lst.py | haotianliu001/HRNet-Lesion | 0 | 4460 | <filename>tools/generate_lst.py
import argparse
import os
image_dir = 'image'
label_dir = 'label'
splits = ['train', 'val', 'test']
image_dirs = [
'image/{}',
'image/{}_crop'
]
label_dirs = [
'label/{}/annotations',
'label/{}/annotations_crop',
]
def generate(root):
assert len(image_dirs) == len(label_dirs)
for split in splits:
for image_path, label_path in zip(image_dirs, label_dirs):
image_path = image_path.format(split)
label_path = label_path.format(split)
if split != 'train' and image_path.endswith('_crop'):
label_path = label_path.replace('_crop', '')
if not os.path.exists(os.path.join(root, label_path)):
continue
lines = []
for label in os.listdir(os.path.join(root, label_path)):
image = label.replace('.png', '.jpg')
if os.path.exists(os.path.join(root, image_path, image)):
lines.append('{} {}\n'.format(os.path.join(image_path, image), os.path.join(label_path, label)))
else:
print('not found: {}'.format(os.path.join(root, image_path, image)))
print(image_path, label_path, len(lines))
output_file = '{}.lst'.format(image_path.split('/')[1])
with open(os.path.join(root, output_file), 'w') as f:
f.writelines(lines)
print(f'Save to {os.path.join(root, output_file)}\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('root', type=str, help='path of dataset root')
args = parser.parse_args()
generate(args.root)
| 2.75 | 3 |
examples/example.py | f-dangel/unfoldNd | 21 | 4461 | <gh_stars>10-100
"""How to use ``unfoldNd``. A comparison with ``torch.nn.Unfold``."""
# imports, make this example deterministic
import torch
import unfoldNd
torch.manual_seed(0)
# random batched RGB 32x32 image-shaped input tensor of batch size 64
inputs = torch.randn((64, 3, 32, 32))
# module hyperparameters
kernel_size = 3
dilation = 1
padding = 1
stride = 2
# both modules accept the same arguments and perform the same operation
torch_module = torch.nn.Unfold(
kernel_size, dilation=dilation, padding=padding, stride=stride
)
lib_module = unfoldNd.UnfoldNd(
kernel_size, dilation=dilation, padding=padding, stride=stride
)
# forward pass
torch_outputs = torch_module(inputs)
lib_outputs = lib_module(inputs)
# check
if torch.allclose(torch_outputs, lib_outputs):
print("✔ Outputs of torch.nn.Unfold and unfoldNd.UnfoldNd match.")
else:
raise AssertionError("❌ Outputs don't match")
| 2.859375 | 3 |
src/pretix/helpers/escapejson.py | NicsTr/pretix | 1 | 4462 | from django.utils.encoding import force_str
from django.utils.functional import keep_lazy
from django.utils.safestring import SafeText, mark_safe
_json_escapes = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
}
_json_escapes_attr = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('"'): '"',
ord("'"): ''',
ord("="): '=',
}
@keep_lazy(str, SafeText)
def escapejson(value):
"""Hex encodes characters for use in a application/json type script."""
return mark_safe(force_str(value).translate(_json_escapes))
@keep_lazy(str, SafeText)
def escapejson_attr(value):
"""Hex encodes characters for use in a html attributw script."""
return mark_safe(force_str(value).translate(_json_escapes_attr))
| 2.296875 | 2 |
pyxley/charts/plotly/base.py | snowind/pyxley | 2,536 | 4463 |
from ..charts import Chart
from flask import jsonify, request
_BASE_CONFIG = {
"showLink": False,
"displaylogo": False,
"modeBarButtonsToRemove": ["sendDataToCloud"]
}
class PlotlyAPI(Chart):
""" Base class for Plotly.js API
This class is used to create charts using the plotly.js api
To keep this general, this chart does not have a default
method of transmitting data. Instead the user must supply
a route_func method.
"""
def __init__(self, chart_id, url, route_func, init_params={}):
options = {
"chartid": chart_id,
"url": url,
"params": init_params
}
super(PlotlyAPI, self).__init__("PlotlyAPI", options, route_func)
@staticmethod
def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG):
""" basic line plot
dataframe to json for a line plot
Args:
df (pandas.DataFrame): input dataframe
xypairs (list): list of tuples containing column names
mode (str): plotly.js mode (e.g. lines)
layout (dict): layout parameters
config (dict): config parameters
"""
if df.empty:
return {
"x": [],
"y": [],
"mode": mode
}
_data = []
for x, y in xypairs:
if (x in df.columns) and (y in df.columns):
_data.append(
{
"x": df[x].values.tolist(),
"y": df[y].values.tolist(),
"mode": mode
}
)
return {
"data": _data,
"layout": layout,
"config": config
}
| 3.203125 | 3 |
pyqt/getting_started/close_window.py | CospanDesign/python | 5 | 4464 | <filename>pyqt/getting_started/close_window.py
#!/usr/bin/python
import sys
from PyQt4 import QtGui
from PyQt4 import QtCore
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
qbtn = QtGui.QPushButton('Quit', self)
qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit)
qbtn.resize(qbtn.sizeHint())
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Quit Button')
self.show()
def main():
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 2.921875 | 3 |
test/means/test_zero_mean.py | bdecost/gpytorch | 0 | 4465 | <gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import unittest
from gpytorch.means import ZeroMean
class TestZeroMean(unittest.TestCase):
def setUp(self):
self.mean = ZeroMean()
def test_forward(self):
a = torch.Tensor([[1, 2], [2, 4]])
res = self.mean(a)
self.assertEqual(tuple(res.size()), (2,))
self.assertTrue(res.eq(0).all())
def test_forward_batch(self):
a = torch.Tensor([[[1, 2], [1, 2], [2, 4]], [[2, 3], [2, 3], [1, 3]]])
res = self.mean(a)
self.assertEqual(tuple(res.size()), (2, 3))
self.assertTrue(res.eq(0).all())
| 2.40625 | 2 |
generator/contact.py | rizzak/python_training | 0 | 4466 | <filename>generator/contact.py
import jsonpickle
import random
import string
from model.contact import Contact
import os.path
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Contact(first_name="", middle_name="", last_name="", nickname="", title="", company="", address="",
home_tel="", mobile_tel="", work_tel="", fax="", email="", homepage="", birthday="",
anniversary="", secondary_address="", secondary_tel="", notes="")] + [
Contact(first_name=random_string('first_name', 10), middle_name=random_string('middle_name', 10), last_name=random_string('last_name', 10),
nickname=random_string('nickname', 10), title=random_string('random_string', 10), company=random_string('company', 10),
address=random_string('address', 10), home_tel=random_string('home_tel', 10), mobile_tel=random_string('mobile_tel', 10),
work_tel=random_string('work_tel', 10), fax=random_string('fax', 10), email=random_string('email', 10),
homepage=random_string('homepage', 10), birthday=random_string('birthday', 10), anniversary=random_string('anniversary', 10),
secondary_address=random_string('secondary_address', 10), secondary_tel=random_string('secondary_tel', 10), notes=random_string('notes', 10))
for i in range(5)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file , "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata)) | 2.78125 | 3 |
Lib/test/test_runpy.py | arvindm95/unladen-swallow | 2,293 | 4467 | # Test the runpy module
import unittest
import os
import os.path
import sys
import tempfile
from test.test_support import verbose, run_unittest, forget
from runpy import _run_code, _run_module_code, run_module
# Note: This module can't safely test _run_module_as_main as it
# runs its tests in the current process, which would mess with the
# real __main__ module (usually test.regrtest)
# See test_cmd_line_script for a test that executes that code path
# Set up the test code and expected results
class RunModuleCodeTest(unittest.TestCase):
expected_result = ["Top level assignment", "Lower level reference"]
test_source = (
"# Check basic code execution\n"
"result = ['Top level assignment']\n"
"def f():\n"
" result.append('Lower level reference')\n"
"f()\n"
"# Check the sys module\n"
"import sys\n"
"run_argv0 = sys.argv[0]\n"
"run_name_in_sys_modules = __name__ in sys.modules\n"
"if run_name_in_sys_modules:\n"
" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\n"
"# Check nested operation\n"
"import runpy\n"
"nested = runpy._run_module_code('x=1\\n', mod_name='<run>')\n"
)
def test_run_code(self):
saved_argv0 = sys.argv[0]
d = _run_code(self.test_source, {})
self.failUnless(d["result"] == self.expected_result)
self.failUnless(d["__name__"] is None)
self.failUnless(d["__file__"] is None)
self.failUnless(d["__loader__"] is None)
self.failUnless(d["__package__"] is None)
self.failUnless(d["run_argv0"] is saved_argv0)
self.failUnless("run_name" not in d)
self.failUnless(sys.argv[0] is saved_argv0)
def test_run_module_code(self):
initial = object()
name = "<Nonsense>"
file = "Some other nonsense"
loader = "Now you're just being silly"
package = '' # Treat as a top level module
d1 = dict(initial=initial)
saved_argv0 = sys.argv[0]
d2 = _run_module_code(self.test_source,
d1,
name,
file,
loader,
package)
self.failUnless("result" not in d1)
self.failUnless(d2["initial"] is initial)
self.failUnless(d2["result"] == self.expected_result)
self.failUnless(d2["nested"]["x"] == 1)
self.failUnless(d2["__name__"] is name)
self.failUnless(d2["run_name_in_sys_modules"])
self.failUnless(d2["module_in_sys_modules"])
self.failUnless(d2["__file__"] is file)
self.failUnless(d2["run_argv0"] is file)
self.failUnless(d2["__loader__"] is loader)
self.failUnless(d2["__package__"] is package)
self.failUnless(sys.argv[0] is saved_argv0)
self.failUnless(name not in sys.modules)
class RunModuleTest(unittest.TestCase):
def expect_import_error(self, mod_name):
try:
run_module(mod_name)
except ImportError:
pass
else:
self.fail("Expected import error for " + mod_name)
def test_invalid_names(self):
# Builtin module
self.expect_import_error("sys")
# Non-existent modules
self.expect_import_error("sys.imp.eric")
self.expect_import_error("os.path.half")
self.expect_import_error("a.bee")
self.expect_import_error(".howard")
self.expect_import_error("..eaten")
# Package
self.expect_import_error("logging")
def test_library_module(self):
run_module("runpy")
def _add_pkg_dir(self, pkg_dir):
os.mkdir(pkg_dir)
pkg_fname = os.path.join(pkg_dir, "__init__"+os.extsep+"py")
pkg_file = open(pkg_fname, "w")
pkg_file.close()
return pkg_fname
def _make_pkg(self, source, depth):
pkg_name = "__runpy_pkg__"
test_fname = "runpy_test"+os.extsep+"py"
pkg_dir = sub_dir = tempfile.mkdtemp()
if verbose: print " Package tree in:", sub_dir
sys.path.insert(0, pkg_dir)
if verbose: print " Updated sys.path:", sys.path[0]
for i in range(depth):
sub_dir = os.path.join(sub_dir, pkg_name)
pkg_fname = self._add_pkg_dir(sub_dir)
if verbose: print " Next level in:", sub_dir
if verbose: print " Created:", pkg_fname
mod_fname = os.path.join(sub_dir, test_fname)
mod_file = open(mod_fname, "w")
mod_file.write(source)
mod_file.close()
if verbose: print " Created:", mod_fname
mod_name = (pkg_name+".")*depth + "runpy_test"
return pkg_dir, mod_fname, mod_name
def _del_pkg(self, top, depth, mod_name):
for entry in list(sys.modules):
if entry.startswith("__runpy_pkg__"):
del sys.modules[entry]
if verbose: print " Removed sys.modules entries"
del sys.path[0]
if verbose: print " Removed sys.path entry"
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except OSError, ex:
if verbose: print ex # Persist with cleaning up
for name in dirs:
fullname = os.path.join(root, name)
try:
os.rmdir(fullname)
except OSError, ex:
if verbose: print ex # Persist with cleaning up
try:
os.rmdir(top)
if verbose: print " Removed package tree"
except OSError, ex:
if verbose: print ex # Persist with cleaning up
def _check_module(self, depth):
pkg_dir, mod_fname, mod_name = (
self._make_pkg("x=1\n", depth))
forget(mod_name)
try:
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name) # Read from source
self.failUnless("x" in d1)
self.failUnless(d1["x"] == 1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name) # Read from bytecode
self.failUnless("x" in d2)
self.failUnless(d2["x"] == 1)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def _add_relative_modules(self, base_dir, source, depth):
if depth <= 1:
raise ValueError("Relative module test needs depth > 1")
pkg_name = "__runpy_pkg__"
module_dir = base_dir
for i in range(depth):
parent_dir = module_dir
module_dir = os.path.join(module_dir, pkg_name)
# Add sibling module
sibling_fname = os.path.join(module_dir, "sibling"+os.extsep+"py")
sibling_file = open(sibling_fname, "w")
sibling_file.close()
if verbose: print " Added sibling module:", sibling_fname
# Add nephew module
uncle_dir = os.path.join(parent_dir, "uncle")
self._add_pkg_dir(uncle_dir)
if verbose: print " Added uncle package:", uncle_dir
cousin_dir = os.path.join(uncle_dir, "cousin")
self._add_pkg_dir(cousin_dir)
if verbose: print " Added cousin package:", cousin_dir
nephew_fname = os.path.join(cousin_dir, "nephew"+os.extsep+"py")
nephew_file = open(nephew_fname, "w")
nephew_file.close()
if verbose: print " Added nephew module:", nephew_fname
def _check_relative_imports(self, depth, run_name=None):
contents = r"""\
from __future__ import absolute_import
from . import sibling
from ..uncle.cousin import nephew
"""
pkg_dir, mod_fname, mod_name = (
self._make_pkg(contents, depth))
try:
self._add_relative_modules(pkg_dir, contents, depth)
pkg_name = mod_name.rpartition('.')[0]
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name, run_name=run_name) # Read from source
self.failUnless("__package__" in d1)
self.failUnless(d1["__package__"] == pkg_name)
self.failUnless("sibling" in d1)
self.failUnless("nephew" in d1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
self.failUnless("__package__" in d2)
self.failUnless(d2["__package__"] == pkg_name)
self.failUnless("sibling" in d2)
self.failUnless("nephew" in d2)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def test_run_module(self):
for depth in range(4):
if verbose: print "Testing package depth:", depth
self._check_module(depth)
def test_explicit_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing relative imports at depth:", depth
self._check_relative_imports(depth)
def test_main_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing main relative imports at depth:", depth
self._check_relative_imports(depth, "__main__")
def test_main():
run_unittest(RunModuleCodeTest)
run_unittest(RunModuleTest)
if __name__ == "__main__":
test_main()
| 2.609375 | 3 |
experiments/_pytorch/_grpc_server/protofiles/imagedata_pb2.py | RedisAI/benchmarks | 6 | 4468 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: imagedata.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='imagedata.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0fimagedata.proto\"H\n\tImageData\x12\r\n\x05image\x18\x01 \x01(\x0c\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\r\n\x05\x64type\x18\x04 \x01(\t\"!\n\x0fPredictionClass\x12\x0e\n\x06output\x18\x01 \x03(\x02\x32<\n\tPredictor\x12/\n\rGetPrediction\x12\n.ImageData\x1a\x10.PredictionClass\"\x00\x62\x06proto3')
)
_IMAGEDATA = _descriptor.Descriptor(
name='ImageData',
full_name='ImageData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image', full_name='ImageData.image', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='height', full_name='ImageData.height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='ImageData.width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dtype', full_name='ImageData.dtype', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=19,
serialized_end=91,
)
_PREDICTIONCLASS = _descriptor.Descriptor(
name='PredictionClass',
full_name='PredictionClass',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='output', full_name='PredictionClass.output', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=126,
)
DESCRIPTOR.message_types_by_name['ImageData'] = _IMAGEDATA
DESCRIPTOR.message_types_by_name['PredictionClass'] = _PREDICTIONCLASS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ImageData = _reflection.GeneratedProtocolMessageType('ImageData', (_message.Message,), dict(
DESCRIPTOR = _IMAGEDATA,
__module__ = 'imagedata_pb2'
# @@protoc_insertion_point(class_scope:ImageData)
))
_sym_db.RegisterMessage(ImageData)
PredictionClass = _reflection.GeneratedProtocolMessageType('PredictionClass', (_message.Message,), dict(
DESCRIPTOR = _PREDICTIONCLASS,
__module__ = 'imagedata_pb2'
# @@protoc_insertion_point(class_scope:PredictionClass)
))
_sym_db.RegisterMessage(PredictionClass)
_PREDICTOR = _descriptor.ServiceDescriptor(
name='Predictor',
full_name='Predictor',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=128,
serialized_end=188,
methods=[
_descriptor.MethodDescriptor(
name='GetPrediction',
full_name='Predictor.GetPrediction',
index=0,
containing_service=None,
input_type=_IMAGEDATA,
output_type=_PREDICTIONCLASS,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_PREDICTOR)
DESCRIPTOR.services_by_name['Predictor'] = _PREDICTOR
# @@protoc_insertion_point(module_scope)
| 1.3125 | 1 |
app/api/admin_sales/discounted.py | akashtalole/python-flask-restful-api | 3 | 4469 | <reponame>akashtalole/python-flask-restful-api<filename>app/api/admin_sales/discounted.py
from sqlalchemy import func
from flask_rest_jsonapi import ResourceList
from marshmallow_jsonapi import fields
from marshmallow_jsonapi.flask import Schema
from app.api.helpers.utilities import dasherize
from app.api.bootstrap import api
from app.models import db
from app.models.discount_code import DiscountCode
from app.models.event import Event
from app.models.order import Order, OrderTicket
from app.models.user import User
def sales_per_marketer_and_discount_by_status(status):
return db.session.query(Event.id.label('event_id'),
DiscountCode.id.label('discount_code_id'),
User.id.label('marketer_id'),
func.sum(Order.amount).label(status + '_sales'),
func.sum(OrderTicket.quantity).label(status + '_tickets')) \
.filter(Event.id == Order.event_id) \
.filter(Order.marketer_id == User.id) \
.filter(Order.discount_code_id == DiscountCode.id) \
.filter(Order.status == status) \
.group_by(Event) \
.group_by(DiscountCode) \
.group_by(User) \
.group_by(Order.status) \
.cte()
class AdminSalesDiscountedSchema(Schema):
"""
Discounted sales by event
Provides
Event name,
discount code,
marketer mail,
count of tickets and total sales for orders grouped by status
"""
class Meta:
type_ = 'admin-sales-discounted'
self_view = 'v1.admin_sales_discounted'
inflect = dasherize
id = fields.String()
code = fields.String()
email = fields.String()
event_name = fields.String()
payment_currency = fields.String()
sales = fields.Method('calc_sales')
@staticmethod
def calc_sales(obj):
"""
Returns sales (dictionary with total sales and ticket count) for
placed, completed and pending orders
"""
res = {'placed': {}, 'completed': {}, 'pending': {}}
res['placed']['sales_total'] = obj.placed_sales or 0
res['placed']['ticket_count'] = obj.placed_tickets or 0
res['completed']['sales_total'] = obj.completed_sales or 0
res['completed']['ticket_count'] = obj.completed_tickets or 0
res['pending']['sales_total'] = obj.pending_sales or 0
res['pending']['ticket_count'] = obj.pending_tickets or 0
return res
class AdminSalesDiscountedList(ResourceList):
"""
Resource for sales by marketer. Joins event marketer and orders and
subsequently accumulates sales by status
"""
def query(self, _):
pending = sales_per_marketer_and_discount_by_status('pending')
completed = sales_per_marketer_and_discount_by_status('completed')
placed = sales_per_marketer_and_discount_by_status('placed')
discounts = self.session.query(Event.id.label('event_id'),
Event.name.label('event_name'),
DiscountCode.id.label('discount_code_id'),
DiscountCode.code.label('code'),
User.id.label('marketer_id'),
User.email.label('email')) \
.filter(Event.id == Order.event_id) \
.filter(Order.marketer_id == User.id) \
.filter(Order.discount_code_id == DiscountCode.id) \
.cte()
return self.session.query(discounts, pending, completed, placed) \
.outerjoin(pending,
(pending.c.event_id == discounts.c.event_id) &
(pending.c.discount_code_id == discounts.c.discount_code_id) &
(pending.c.marketer_id == discounts.c.marketer_id)) \
.outerjoin(completed,
(completed.c.event_id == discounts.c.event_id) &
(completed.c.discount_code_id == discounts.c.discount_code_id) &
(completed.c.marketer_id == discounts.c.marketer_id)) \
.outerjoin(placed,
(placed.c.event_id == discounts.c.event_id) &
(placed.c.discount_code_id == discounts.c.discount_code_id) &
(placed.c.marketer_id == discounts.c.marketer_id))
methods = ['GET']
decorators = (api.has_permission('is_admin'), )
schema = AdminSalesDiscountedSchema
data_layer = {
'model': Event,
'session': db.session,
'methods': {
'query': query
}
}
| 2.171875 | 2 |
spacy/lang/sr/__init__.py | g4brielvs/spaCy | 4 | 4470 | from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lex_attrs import LEX_ATTRS
from ...language import Language
class SerbianDefaults(Language.Defaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Serbian(Language):
lang = "sr"
Defaults = SerbianDefaults
__all__ = ["Serbian"]
| 2.203125 | 2 |
mmdet/ops/dcn/__init__.py | TJUsym/TJU_Advanced_CV_Homework | 1,158 | 4471 | <reponame>TJUsym/TJU_Advanced_CV_Homework
from .functions.deform_conv import deform_conv, modulated_deform_conv
from .functions.deform_pool import deform_roi_pooling
from .modules.deform_conv import (DeformConv, ModulatedDeformConv,
DeformConvPack, ModulatedDeformConvPack)
from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack,
ModulatedDeformRoIPoolingPack)
__all__ = [
'DeformConv', 'DeformConvPack', 'ModulatedDeformConv',
'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',
'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv',
'deform_roi_pooling'
]
| 1.71875 | 2 |
api/skill/serializer.py | zaubermaerchen/imas_cg_api | 2 | 4472 | <gh_stars>1-10
# coding: utf-8
from rest_framework import serializers
from data.models import Skill, SkillValue
class ListSerializer(serializers.ModelSerializer):
skill_value_list = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Skill
fields = [
'skill_id',
'target_unit',
'target_member',
'target_type',
'target_num',
'target_param',
'skill_value_id',
'skill_value_list',
'comment'
]
@staticmethod
def get_skill_value_list(obj):
return SkillValue.get_value_list(obj.skill_value_id)
class Costar(object):
def __init__(self, name, count):
self.name = name
self.count = count
class CostarSerializer(serializers.Serializer):
name = serializers.CharField(max_length=255)
count = serializers.IntegerField()
def create(self, validated_data):
return Costar(**validated_data)
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.count = validated_data.get('count', instance.count)
return instance
| 2.265625 | 2 |
Codes/Converting_RGB_to_GreyScale.py | sichkar-valentyn/Image_processing_in_Python | 3 | 4473 | # File: Converting_RGB_to_GreyScale.py
# Description: Opening RGB image as array, converting to GreyScale and saving result into new file
# Environment: PyCharm and Anaconda environment
#
# MIT License
# Copyright (c) 2018 <NAME>
# github.com/sichkar-valentyn
#
# Reference to:
# <NAME>. Image processing in Python // GitHub platform. DOI: 10.5281/zenodo.1343603
# Opening RGB image as array, converting to GreyScale and saving result into new file
# Importing needed libraries
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from skimage import color
from skimage import io
import scipy.misc
# Creating an array from image data
image_RGB = Image.open("images/eagle.jpg")
image_np = np.array(image_RGB)
# Checking the type of the array
print(type(image_np)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_np.shape)
# Showing image with every channel separately
channel_R = image_np[:, :, 0]
channel_G = image_np[:, :, 1]
channel_B = image_np[:, :, 2]
# Creating a figure with subplots
f, ax = plt.subplots(nrows=2, ncols=2)
# ax is (2, 2) np array and to make it easier to read we use 'flatten' function
# Or we can call each time ax[0, 0]
ax0, ax1, ax2, ax3 = ax.flatten()
# Adjusting first subplot
ax0.imshow(channel_R, cmap='Reds')
ax0.set_xlabel('')
ax0.set_ylabel('')
ax0.set_title('Red channel')
# Adjusting second subplot
ax1.imshow(channel_G, cmap='Greens')
ax1.set_xlabel('')
ax1.set_ylabel('')
ax1.set_title('Green channel')
# Adjusting third subplot
ax2.imshow(channel_B, cmap='Blues')
ax2.set_xlabel('')
ax2.set_ylabel('')
ax2.set_title('Blue channel')
# Adjusting fourth subplot
ax3.imshow(image_np)
ax3.set_xlabel('')
ax3.set_ylabel('')
ax3.set_title('Original image')
# Function to make distance between figures
plt.tight_layout()
# Giving the name to the window with figure
f.canvas.set_window_title('Eagle image in three channels R, G and B')
# Showing the plots
plt.show()
# Converting RGB image into GrayScale image
# Using formula:
# Y' = 0.299 R + 0.587 G + 0.114 B
image_RGB = Image.open("images/eagle.jpg")
image_np = np.array(image_RGB)
image_GreyScale = image_np[:, :, 0] * 0.299 + image_np[:, :, 1] * 0.587 + image_np[:, :, 2] * 0.114
# Checking the type of the array
print(type(image_GreyScale)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_GreyScale.shape)
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Showing the image by using obtained array
plt.imshow(image_GreyScale, cmap='Greys')
plt.show()
# Preparing array for saving - creating three channels with the same data in each
# Firstly, creating array with zero elements
# And by 'image_GreyScale.shape + tuple([3])' we add one more element '3' to the tuple
# Now the shape will be (1080, 1920, 3) - which is tuple type
image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape + tuple([3]))
# Secondly, reshaping GreyScale image from 2D to 3D
x = image_GreyScale.reshape((1080, 1920, 1))
# Finally, writing all data in three channels
image_GreyScale_with_3_channels[:, :, 0] = x[:, :, 0]
image_GreyScale_with_3_channels[:, :, 1] = x[:, :, 0]
image_GreyScale_with_3_channels[:, :, 2] = x[:, :, 0]
# Saving image into a file from obtained 3D array
scipy.misc.imsave("images/result_1.jpg", image_GreyScale_with_3_channels)
# Checking that image was written with three channels and they are identical
result_1 = Image.open("images/result_1.jpg")
result_1_np = np.array(result_1)
print(result_1_np.shape)
print(np.array_equal(result_1_np[:, :, 0], result_1_np[:, :, 1]))
print(np.array_equal(result_1_np[:, :, 1], result_1_np[:, :, 2]))
# Showing saved resulted image
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Here we don't need to specify the map like cmap='Greys'
plt.imshow(result_1_np)
plt.show()
# Another way to convert RGB image into GreyScale image
image_RGB = io.imread("images/eagle.jpg")
image_GreyScale = color.rgb2gray(image_RGB)
# Checking the type of the array
print(type(image_GreyScale)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_GreyScale.shape)
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Showing the image by using obtained array
plt.imshow(image_GreyScale, cmap='Greys')
plt.show()
# Saving converted image into a file from processed array
scipy.misc.imsave("images/result_2.jpg", image_GreyScale)
# One more way for converting
image_RGB_as_GreyScale = io.imread("images/eagle.jpg", as_gray=True)
# Checking the type of the array
print(type(image_RGB_as_GreyScale)) # <class 'numpy.ndarray'>
# Checking the shape of the array
print(image_RGB_as_GreyScale.shape)
# Giving the name to the window with figure
plt.figure('GreyScaled image from RGB')
# Showing the image by using obtained array
plt.imshow(image_RGB_as_GreyScale, cmap='Greys')
plt.show()
# Saving converted image into a file from processed array
scipy.misc.imsave("images/result_3.jpg", image_RGB_as_GreyScale)
| 3.859375 | 4 |
template_renderer.py | hamza-gheggad/gcp-iam-collector | 0 | 4474 | <filename>template_renderer.py<gh_stars>0
import colorsys
import json
from jinja2 import Environment, PackageLoader
import graph
def create_html(formatted_nodes, formatted_edges, role_color_map, output_name):
env = Environment(loader=PackageLoader('visualisation', '.'))
template = env.get_template('visualisation.template')
default_filters = list(graph.type_properties.keys())
all_roles=list(role_color_map.keys())
print(all_roles)
html = template.render(formatted_nodes=formatted_nodes,
formatted_edges=formatted_edges,
type_properties=graph.type_properties,
default_filters=default_filters,
all_roles=all_roles)
with open(output_name, "w+") as resource_file:
resource_file.write(html)
def get_description(node):
desc = node.get_type_name() + "</br>"
if node.title:
desc = desc + node.title + "</br>"
if node.properties:
for k, v in node.properties.items():
desc = desc + k + ": " + str(v) + "</br>"
return desc
def render(nodes, edges, output_name):
color_map = roles_to_color_map(edges=edges)
formatted_nodes, formatted_edges = format_graph(nodes, edges, color_map)
create_html(formatted_nodes, formatted_edges, color_map, output_name)
def color_for_role(role, all_roles):
hue = float(all_roles.index(role)) / len(all_roles)
return '#%02x%02x%02x' % tuple(int(c) * 255 for c in colorsys.hsv_to_rgb(hue, 1, 0.85))
def sanitise_role(role):
return str(role).replace('roles/', '') \
.lower() \
.replace('writer', 'editor') \
.replace('reader', 'viewer')
def roles_to_color_map(edges):
all_roles = list({sanitise_role(e.role) for e in edges if e.role})
role_map = {}
for role in all_roles:
role_map[role] = color_for_role(role, all_roles)
role_map['other'] = '#00c0ff'
return role_map
def format_graph(nodes, edges, role_color_map):
nodes_list = []
node_ids = {}
for counter, node in enumerate(nodes):
node_ids[node.id] = counter
value = {
'id': counter,
'shape': 'icon',
'label': node.name,
'type': node.node_type,
'icon': {
'face': 'Font Awesome 5 Free',
'code': node.get_font_code(),
'size': node.get_size(),
'color': node.get_color(),
'weight': 'bold'
}
}
description = get_description(node)
if description:
value['title'] = description
nodes_list.append(json.dumps(value).replace("\\\\", "\\"))
edges_list = []
for edge in edges:
value = {
'from': node_ids[edge.node_from.id],
'to': node_ids[edge.node_to.id],
'arrows': 'to',
}
if edge.label:
value['label'] = edge.label
if edge.title:
value['title'] = edge.title
value['role'] = sanitise_role(edge.role) if edge.role else 'other'
value['color'] = role_color_map[value['role']]
edges_list.append(json.dumps(value))
return nodes_list, edges_list | 2.59375 | 3 |
powerapi/cli/tools.py | danglotb/powerapi | 0 | 4475 | <gh_stars>0
# Copyright (c) 2018, INRIA
# Copyright (c) 2018, University of Lille
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import logging
from functools import reduce
from powerapi.exception import PowerAPIException
from powerapi.cli.parser import MainParser, ComponentSubParser
from powerapi.cli.parser import store_true
from powerapi.cli.parser import BadValueException, MissingValueException
from powerapi.cli.parser import BadTypeException, BadContextException
from powerapi.cli.parser import UnknowArgException
from powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel
from powerapi.database import MongoDB, CsvDB, InfluxDB, OpenTSDB
from powerapi.puller import PullerActor
from powerapi.pusher import PusherActor
def enable_log(arg, val, args, acc):
acc[arg] = logging.DEBUG
return args, acc
def check_csv_files(files):
return reduce(lambda acc, f: acc and os.access(f, os.R_OK), files.split(','), True)
def extract_file_names(arg, val, args, acc):
acc[arg] = val.split(',')
return args, acc
class CommonCLIParser(MainParser):
def __init__(self):
MainParser.__init__(self)
self.add_argument('v', 'verbose', flag=True, action=enable_log, default=logging.NOTSET,
help='enable verbose mode')
self.add_argument('s', 'stream', flag=True, action=store_true, default=False, help='enable stream mode')
subparser_mongo_input = ComponentSubParser('mongodb')
subparser_mongo_input.add_argument('u', 'uri', help='sepcify MongoDB uri')
subparser_mongo_input.add_argument('d', 'db', help='specify MongoDB database name', )
subparser_mongo_input.add_argument('c', 'collection', help='specify MongoDB database collection')
subparser_mongo_input.add_argument('n', 'name', help='specify puller name', default='puller_mongodb')
subparser_mongo_input.add_argument('m', 'model', help='specify data type that will be storen in the database',
default='HWPCReport')
self.add_component_subparser('input', subparser_mongo_input,
help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ')
subparser_csv_input = ComponentSubParser('csv')
subparser_csv_input.add_argument('f', 'files',
help='specify input csv files with this format : file1,file2,file3',
action=extract_file_names, default=[], check=check_csv_files,
check_msg='one or more csv files couldn\'t be read')
subparser_csv_input.add_argument('m', 'model', help='specify data type that will be storen in the database',
default='HWPCReport')
subparser_csv_input.add_argument('n', 'name', help='specify puller name', default='puller_csv')
self.add_component_subparser('input', subparser_csv_input,
help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ')
subparser_mongo_output = ComponentSubParser('mongodb')
subparser_mongo_output.add_argument('u', 'uri', help='sepcify MongoDB uri')
subparser_mongo_output.add_argument('d', 'db', help='specify MongoDB database name')
subparser_mongo_output.add_argument('c', 'collection', help='specify MongoDB database collection')
subparser_mongo_output.add_argument('m', 'model', help='specify data type that will be storen in the database',
default='PowerReport')
subparser_mongo_output.add_argument('n', 'name', help='specify puller name', default='pusher_mongodb')
self.add_component_subparser('output', subparser_mongo_output,
help_str='specify a database output : --db_output database_name ARG1 ARG2 ...')
subparser_csv_output = ComponentSubParser('csv')
subparser_csv_output.add_argument('d', 'directory',
help='specify directory where where output csv files will be writen')
subparser_csv_output.add_argument('m', 'model', help='specify data type that will be storen in the database',
default='PowerReport')
subparser_csv_output.add_argument('n', 'name', help='specify puller name', default='pusher_csv')
self.add_component_subparser('output', subparser_csv_output,
help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ')
subparser_influx_output = ComponentSubParser('influxdb')
subparser_influx_output.add_argument('u', 'uri', help='sepcify InfluxDB uri')
subparser_influx_output.add_argument('d', 'db', help='specify InfluxDB database name')
subparser_influx_output.add_argument('p', 'port', help='specify InfluxDB connection port', type=int)
subparser_influx_output.add_argument('m', 'model', help='specify data type that will be storen in the database',
default='PowerReport')
subparser_influx_output.add_argument('n', 'name', help='specify puller name', default='pusher_influxdb')
self.add_component_subparser('output', subparser_influx_output,
help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ')
subparser_opentsdb_output = ComponentSubParser('opentsdb')
subparser_opentsdb_output.add_argument('u', 'uri', help='sepcify openTSDB host')
subparser_opentsdb_output.add_argument('p', 'port', help='specify openTSDB connection port', type=int)
subparser_opentsdb_output.add_argument('metric_name', help='specify metric name')
subparser_opentsdb_output.add_argument('m', 'model', help='specify data type that will be storen in the database',
default='PowerReport')
subparser_opentsdb_output.add_argument('n', 'name', help='specify puller name', default='pusher_opentsdb')
self.add_component_subparser('output', subparser_opentsdb_output,
help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ')
def parse_argv(self):
try:
return self.parse(sys.argv[1:])
except BadValueException as exn:
msg = 'CLI error : argument ' + exn.argument_name + ' : ' + exn.msg
print(msg, file=sys.stderr)
except MissingValueException as exn:
msg = 'CLI error : argument ' + exn.argument_name + ' : expect a value'
print(msg, file=sys.stderr)
except BadTypeException as exn:
msg = 'CLI error : argument ' + exn.argument_name + ' : expect '
msg += exn.article + ' ' + exn.type_name
print(msg, file=sys.stderr)
except UnknowArgException as exn:
msg = 'CLI error : unknow argument ' + exn.argument_name
print(msg, file=sys.stderr)
except BadContextException as exn:
msg = 'CLI error : argument ' + exn.argument_name
msg += ' not used in the correct context\nUse it with the following arguments :'
for main_arg_name, context_name in exn.context_list:
msg += '\n --' + main_arg_name + ' ' + context_name
print(msg, file=sys.stderr)
sys.exit()
class Generator:
def __init__(self, component_group_name):
self.component_group_name = component_group_name
def generate(self, config):
if self.component_group_name not in config:
print('CLI error : no ' + self.component_group_name + ' specified', file=sys.stderr)
sys.exit()
actors = {}
for component_type, components_list in config[self.component_group_name].items():
for component_name, component_config in components_list.items():
try:
actors[component_name] = self._gen_actor(component_type, component_config, config)
except KeyError as exn:
msg = 'CLI error : argument ' + exn.args[0]
msg += ' needed with --output ' + component_type
print(msg, file=sys.stderr)
sys.exit()
return actors
def _gen_actor(self, component_name, component_config, main_config):
raise NotImplementedError()
class ModelNameAlreadyUsed(PowerAPIException):
"""
Exception raised when attempting to add to a DBActorGenerator a model factory with a name already bound to another
model factory in the DBActorGenerator
"""
class ModelNameAlreadyUsed(PowerAPIException):
"""
Exception raised when attempting to add to a DBActorGenerator a database factory with a name already bound to another
database factory in the DBActorGenerator
"""
class DBActorGenerator(Generator):
def __init__(self, component_group_name):
Generator.__init__(self, component_group_name)
self.model_factory = {
'HWPCReport': HWPCModel(),
'PowerReport': PowerModel(),
'FormulaReport': FormulaModel(),
'ControlReport': ControlModel(),
}
self.db_factory = {
'mongodb': lambda db_config: MongoDB(db_config['uri'], db_config['db'], db_config['collection']),
'csv': lambda db_config: CsvDB(current_path=os.getcwd() if 'directory' not in db_config else db_config['directory'],
files=[] if 'files' not in db_config else db_config['files']),
'influxdb': lambda db_config: InfluxDB(db_config['uri'], db_config['port'], db_config['db']),
'opentsdb': lambda db_config: OpenTSDB(db_config['uri'], db_config['port'], db_config['metric_name']),
}
def add_model_factory(self, model_name, model_factory):
if model_name in self.model_factory:
raise ModelNameAlreadyUsed()
self.model_factory[model_name] = model_factory
def add_db_factory(self, db_name, db_factory):
if db_name in self.model_factory:
raise ModelNameAlreadyUsed()
self.model_factory[db_name] = db_factory
def _generate_db(self, db_name, db_config, main_config):
return self.db_factory[db_name](db_config)
def _gen_actor(self, db_name, db_config, main_config):
db = self._generate_db(db_name, db_config, main_config)
model = self.model_factory[db_config['model']]
name = db_config['name']
return self._actor_factory(name, db, model, main_config['stream'], main_config['verbose'])
def _actor_factory(self, name, db, model, stream_mode, level_logger):
raise NotImplementedError()
class PullerGenerator(DBActorGenerator):
def __init__(self, report_filter):
DBActorGenerator.__init__(self, 'input')
self.report_filter = report_filter
def _actor_factory(self, name, db, model, stream_mode, level_logger):
return PullerActor(name, db, self.report_filter, model, stream_mode, level_logger)
class PusherGenerator(DBActorGenerator):
def __init__(self):
DBActorGenerator.__init__(self, 'output')
def _actor_factory(self, name, db, model, stream_mode, level_logger):
return PusherActor(name, model, db, level_logger)
| 1.195313 | 1 |
pyxrd/mixture/models/insitu_behaviours/insitu_behaviour.py | PyXRD/pyxrd | 27 | 4476 | # coding=UTF-8
# ex:ts=4:sw=4:et=on
#
# Copyright (c) 2013, <NAME>
# All rights reserved.
# Complete license can be found in the LICENSE file.
from mvc.models.properties import StringProperty
from pyxrd.generic.io.custom_io import storables, Storable
from pyxrd.generic.models.base import DataModel
from pyxrd.refinement.refinables.mixins import RefinementGroup
@storables.register()
class InSituBehaviour(DataModel, RefinementGroup, Storable):
"""
Interface class for coding in-situ behaviour scripts.
Sub-classes should override or implement the methods below.
"""
# MODEL INTEL:
class Meta(DataModel.Meta):
store_id = "InSituBehaviour" # Override this so it is a unique string
concrete = False # Indicates this cannot be instantiated and added in the UI
mixture = property(DataModel.parent.fget, DataModel.parent.fset)
# REFINEMENT GROUP IMPLEMENTATION:
@property
def refine_title(self):
return "In-situ behaviour"
@property
def refine_descriptor_data(self):
return dict(
phase_name=self.phase.refine_title,
component_name="*"
)
#: The name of this Behaviour
name = StringProperty(
default="New Behaviour", text="Name",
visible=True, persistent=True, tabular=True
)
# ------------------------------------------------------------
# Initialization and other internals
# ------------------------------------------------------------
def __init__(self, *args, **kwargs):
my_kwargs = self.pop_kwargs(kwargs,
*[prop.label for prop in InSituBehaviour.Meta.get_local_persistent_properties()]
)
super(InSituBehaviour, self).__init__(*args, **kwargs)
kwargs = my_kwargs
with self.data_changed.hold():
self.name = self.get_kwarg(kwargs, self.name, "name")
pass #end of constructor
# ------------------------------------------------------------
# Methods & Functions
# ------------------------------------------------------------
def apply(self, phase):
assert phase is not None, "Cannot apply on None"
assert self.is_compatible_with(phase), "`%r` is not compatible with phase `%r`" % (self, phase)
def is_compatible_with(self, phase):
return False # sub classes need to override this
pass #end of class | 1.914063 | 2 |
1 plainProgrammingBug/start 1 plainProgrammingBug.py | vishalbelsare/SLAPP3 | 8 | 4477 | <reponame>vishalbelsare/SLAPP3<filename>1 plainProgrammingBug/start 1 plainProgrammingBug.py
# start 1 plainProgrammingBug.py
import random
def SimpleBug():
# the environment
worldXSize = 80
worldYSize = 80
# the bug
xPos = 40
yPos = 40
# the action
for i in range(100):
xPos += randomMove()
yPos += randomMove()
xPos = (xPos + worldXSize) % worldXSize
yPos = (yPos + worldYSize) % worldYSize
print ("I moved to X = ", xPos, " Y = ", yPos)
# returns -1, 0, 1 with equal probability
def randomMove():
return random.randint(-1, 1)
SimpleBug()
"""
you can eliminate the randomMove() function substituting
xPos += randomMove()
yPos += randomMove()
with
xPos += random.randint(-1, 1)
yPos += random.randint(-1, 1)
but the use of the function allows us to use here a self-explanatory
name
"""
| 3.359375 | 3 |
ba5a-min-coins/money_change.py | kjco/bioinformatics-algorithms | 0 | 4478 |
money = 8074
#money = 18705
#coin_list = [24,23,21,5,3,1]
coin_list = [24,13,12,7,5,3,1]
#coin_list = map(int, open('dataset_71_8.txt').read().split(','))
d = {0:0}
for m in range(1,money+1):
min_coin = 1000000
for coin in coin_list:
if m >= coin:
if d[m-coin]+1 < min_coin:
min_coin = d[m-coin]+1
d[m] = min_coin
#print d
print d[money]
| 2.9375 | 3 |
examples/remove_comments.py | igordejanovic/textx-bibtex | 1 | 4479 | """
Remove comments from bib file.
"""
from textx import metamodel_for_language
from txbibtex import bibentry_str
BIB_FILE = 'references.bib'
bibfile = metamodel_for_language('bibtex').model_from_file(BIB_FILE)
# Drop line comments.
print('\n'.join([bibentry_str(e) for e in bibfile.entries
if e.__class__.__name__ != 'BibLineComment']))
| 2.8125 | 3 |
google-cloud-sdk/lib/surface/compute/resource_policies/create/group_placement.py | bopopescu/Social-Lite | 0 | 4480 | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create resource policy command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils as compute_api
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.resource_policies import flags
from googlecloudsdk.command_lib.compute.resource_policies import util
def _CommonArgs(parser, api_version):
"""A helper function to build args based on different API version."""
messages = apis.GetMessagesModule('compute', api_version)
flags.MakeResourcePolicyArg().AddArgument(parser)
flags.AddCommonArgs(parser)
flags.AddGroupPlacementArgs(parser, messages)
parser.display_info.AddCacheUpdater(None)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class CreateGroupPlacement(base.CreateCommand):
"""Create a Google Compute Engine Group Placement Resource Policy."""
@staticmethod
def Args(parser):
_CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
policy_ref = flags.MakeResourcePolicyArg().ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
messages = holder.client.messages
resource_policy = util.MakeGroupPlacementPolicy(policy_ref, args, messages)
create_request = messages.ComputeResourcePoliciesInsertRequest(
resourcePolicy=resource_policy,
project=policy_ref.project,
region=policy_ref.region)
service = holder.client.apitools_client.resourcePolicies
return client.MakeRequests([(service, 'Insert', create_request)])[0]
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateGroupPlacementBeta(CreateGroupPlacement):
"""Create a Google Compute Engine Group Placement Resource Policy."""
@staticmethod
def Args(parser):
_CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION)
CreateGroupPlacement.detailed_help = {
'DESCRIPTION':
"""\
Create a Google Compute Engine Group Placement Resource Policy.
""",
'EXAMPLES':
"""\
To create a Google Compute Engine Group Placement Resource policy with 2 VMs and 2 availability domains, run:
$ {command} my-resource-policy --region=REGION --vm-count=2 --availability-domain-count=2
"""
}
| 1.835938 | 2 |
paperoni/io.py | notoraptor/paperoni | 88 | 4481 | <gh_stars>10-100
import json
from .papers import Papers
from .researchers import Researchers
def ResearchersFile(filename):
"""Parse a file containing researchers."""
try:
with open(filename, "r") as file:
data = json.load(file)
except FileNotFoundError:
data = {}
return Researchers(data, filename=filename)
def PapersFile(filename, researchers=None):
"""Parse a file containing papers."""
try:
with open(filename, "r") as file:
data = json.load(file)
except FileNotFoundError:
data = {}
return Papers(data, filename=filename, researchers=researchers)
| 3.0625 | 3 |
src/lib/sd2/test_addresses.py | zachkont/sd2 | 0 | 4482 | <reponame>zachkont/sd2
#############################################################################
# Copyright (c) 2017 SiteWare Corp. All right reserved
#############################################################################
import logging
import pytest
from . import addresses
def test_pytest():
assert True
def test_object_exists():
assert addresses.cidr_db
def test_new_address():
address = addresses.cidr_db.get_address_for_host('test_test_foo')
assert address
assert address >= addresses.cidr_db.first_address()
assert address <= addresses.cidr_db.last_address()
addresses.cidr_db.reload()
assert addresses.cidr_db.get_address_for_host('test_test_foo') == address
assert addresses.cidr_db.has('test_test_foo')
addresses.cidr_db.forget('test_test_foo')
assert not addresses.cidr_db.has('test_test_foo')
addresses.cidr_db.reload()
assert not addresses.cidr_db.has('test_test_foo')
| 2.15625 | 2 |
config_model.py | Asha-ai/BERT_abstractive_proj | 17 | 4483 | <filename>config_model.py
import texar.tf as tx
beam_width = 5
hidden_dim = 768
bert = {
'pretrained_model_name': 'bert-base-uncased'
}
# See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams
bert_encoder = {}
# From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45
# with adjustments for BERT
decoder = {
'dim': hidden_dim,
'num_blocks': 6,
'multihead_attention': {
'num_heads': 8,
'output_dim': hidden_dim
},
'initializer': {
'type': 'variance_scaling_initializer',
'kwargs': {
'scale': 1.0,
'mode': 'fan_avg',
'distribution': 'uniform',
},
},
'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim)
}
loss_label_confidence = 0.9
opt = {
'optimizer': {
'type': 'AdamOptimizer',
'kwargs': {
'beta1': 0.9,
'beta2': 0.997,
'epsilon': 1e-9
}
}
}
lr = {
# The 'learning_rate_schedule' can have the following 3 values:
# - 'static' -> A simple static learning rate, specified by 'static_lr'
# - 'aiayn' -> The learning rate used in the "Attention is all you need" paper.
# - 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' -> The learning rate for Texar's Transformer example
'learning_rate_schedule': 'aiayn',
# The learning rate constant used for the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate
'lr_constant': 2 * (hidden_dim ** -0.5),
# The warmup steps for the 'aiayn' and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate
'warmup_steps': 4000,
# The static learning rate, when 'static' is used.
'static_lr': 1e-3,
# A multiplier that can be applied to the 'aiayn' learning rate.
'aiayn_multiplier': 0.2
}
| 2.3125 | 2 |
wishes/migrations/0005_auto_20201029_0904.py | e-elson/bd | 0 | 4484 | # Generated by Django 3.1.2 on 2020-10-29 09:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wishes', '0004_auto_20201029_0857'),
]
operations = [
migrations.AlterField(
model_name='gallery',
name='image',
field=models.FilePathField(path='/images'),
),
]
| 1.40625 | 1 |
undeployed/legacy/Landsat/DNtoReflectance.py | NASA-DEVELOP/dnppy | 65 | 4485 | #-------------------------------------------------------------------------------
# Name: Landsat Digital Numbers to Radiance/Reflectance
# Purpose: To convert landsat 4,5, or 7 pixel values from digital numbers
# to Radiance, Reflectance, or Temperature
# Author: <NAME> <EMAIL>
# NASA DEVELOP Program
# Created: 19/10/2012
#-------------------------------------------------------------------------------
import arcpy
import math
arcpy.CheckOutExtension("Spatial")
def DNtoReflectance(Lbands,MetaData,OutputType="Reflectance/Temperature",Save=False,OutputFolder=""):
"""This function is used to convert Landsat 4,5, or 7 pixel values from
digital numbers to Radiance, Reflectance, or Temperature (if using Band 6)
-----Inputs------
Lbands: GeoTIFF files containing individual bands of Landsat imagery. These
must have the original names as downloaded and must be from a single scene.
MetaData: The metadata text file that is downloaded with the Landsat Bands themselves.
This may be either the old or new MTL.txt file.
OutputType: Choose whether the output should be:
"Radiance"
"Reflectance/Temperature" - Calculates Reflectance for spectral bands
and Temperature in Kelvin for Thermal bands
Save: Boolean value that indicates whether the output rasters will be saved permanantly
Each band will be saved as an individual GeoTIFF file and be named
accoriding to the original filename and the output pixel unit
*if this is true, then the OutputFolder variable must also be set
OutputFolder: Folder in which to save the output rasters
-----Outputs-----
A list of arcpy raster objects in a sequence that mirrors that of the input Lbands
"""
OutList=[]
#These lists will be used to parse the meta data text file and locate relevant information
#metadata format was changed August 29, 2012. This tool can process either the new or old format
newMeta=['LANDSAT_SCENE_ID = "','DATE_ACQUIRED = ',"SUN_ELEVATION = ",
"RADIANCE_MAXIMUM_BAND_{0} = ","RADIANCE_MINIMUM_BAND_{0} = ",
"QUANTIZE_CAL_MAX_BAND_{0} = ","QUANTIZE_CAL_MIN_BAND_{0} = "]
oldMeta=['BAND1_FILE_NAME = "',"ACQUISITION_DATE = ","SUN_ELEVATION = ",
"LMAX_BAND{0} = ","LMIN_BAND{0} = ",
"QCALMAX_BAND{0} = ","QCALMIN_BAND{0} = "]
f=open(MetaData)
MText=f.read()
#the presence of a PRODUCT_CREATION_TIME category is used to identify old metadata
#if this is not present, the meta data is considered new.
#Band6length refers to the length of the Band 6 name string. In the new metadata this string is longer
if "PRODUCT_CREATION_TIME" in MText:
Meta=oldMeta
Band6length=2
else:
Meta=newMeta
Band6length=8
#The tilename is located using the newMeta/oldMeta indixes and the date of capture is recorded
if Meta==newMeta:
TileName=MText.split(Meta[0])[1].split('"')[0]
year=TileName[9:13]
jday=TileName[13:16]
elif Meta==oldMeta:
TileName=MText.split(Meta[0])[1].split('"')[0]
year=TileName[13:17]
jday=TileName[17:20]
date=MText.split(Meta[1])[1].split('\n')[0]
#the spacecraft from which the imagery was capture is identified
#this info determines the solar exoatmospheric irradiance (ESun) for each band
spacecraft=MText.split('SPACECRAFT_ID = "')[1].split('"')[0]
ThermBands=["6"]
if "7" in spacecraft:
ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0. ,82.07,1368.00)
ThermBands=["B6_VCID_1","B6_VCID_2"]
elif "5" in spacecraft: ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0. ,80.67)
elif "4" in spacecraft: ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0. ,80.72)
elif "8" in spacecraft:
ESun=(1857.0,1996.0,1812.0,1516.0,983.3 ,251.8,85.24,0.0,389.3,0.,0.)
ThermBands=["10","11"]
else:
arcpy.AddError("This tool only works for Landsat 4, 5, 7 or 8 ")
raise arcpy.ExecuteError()
#determing if year is leap year and setting the Days in year accordingly
if float(year) % 4 ==0: DIY=366.
else:DIY=365.
#using the date to determing the distance from the sun
theta =2*math.pi*float(jday)/DIY
dSun2 = (1.00011 + 0.034221*math.cos(theta) + 0.001280*math.sin(theta) +
0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta) )
SZA=90.-float(MText.split(Meta[2])[1].split("\n")[0])
#Calculating values for each band
for pathname in Lbands:
try:
BandNum=pathname.split("\\")[-1].split("B")[1][0:2]
try: int(BandNum)
except: BandNum=pathname.split("\\")[-1].split("B")[1][0]
except:
msg="Error reading Band {0}. Bands must have original names as downloaded.".format(str(inputbandnum))
arcpy.AddError(msg)
print msg
raise arcpy.ExecuteError
#changing Band 6 name to match metadata
if BandNum=="6" and spacecraft[8]=="7":
BandNum=pathname.split("\\")[-1].split("B")[1][0:Band6length]
print "Processing Band {0}".format(BandNum)
Oraster=arcpy.Raster(pathname)
#using the oldMeta/newMeta indixes to pull the min/max for radiance/Digital numbers
LMax= float(MText.split(Meta[3].format(BandNum))[1].split("\n")[0])
LMin= float(MText.split(Meta[4].format(BandNum))[1].split("\n")[0])
QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split("\n")[0])
QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split("\n")[0])
Radraster=(((LMax - LMin)/(QCalMax-QCalMin)) * (Oraster - QCalMin)) +LMin
Oraster=0
if OutputType=="Radiance":
Radraster.save("{0}\\{1}_B{2}_Radiance.tif".format(OutputFolder,TileName,BandNum))
Radraster=0
elif OutputType=="Reflectance/Temperature":
#Calculating temperature for band 6 if present
if BandNum in ThermBands:
Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0))
BandPath="{0}\\{1}_B{2}_Temperature.tif".format(OutputFolder,TileName,BandNum)
arcpy.AddMessage("Proceeded through if")
#Otherwise calculate reflectance
else:
Refraster=( math.pi * Radraster * dSun2) / (ESun[int(BandNum[0])-1] * math.cos(SZA*math.pi/180) )
BandPath="{0}\\{1}_B{2}_TOA_Reflectance.tif".format(OutputFolder,TileName,BandNum)
arcpy.AddMessage("Proceeded through else")
if Save==True:
Refraster.save(BandPath)
OutList.append(arcpy.Raster(BandPath))
else:
OutList.append(Refraster)
del Refraster,Radraster
arcpy.AddMessage( "Reflectance Calculated for Band {0}".format(BandNum))
print "Reflectance Calculated for Band {0}".format(BandNum)
f.close()
return OutList
| 3.203125 | 3 |
.modules/.theHarvester/discovery/twittersearch.py | termux-one/EasY_HaCk | 1,103 | 4486 | import string
import requests
import sys
import myparser
import re
class search_twitter:
def __init__(self, word, limit):
self.word = word.replace(' ', '%20')
self.results = ""
self.totalresults = ""
self.server = "www.google.com"
self.hostname = "www.google.com"
self.userAgent = "(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100116 Firefox/3.7"
self.quantity = "100"
self.limit = int(limit)
self.counter = 0
def do_search(self):
try:
urly="https://"+ self.server + "/search?num=100&start=" + str(self.counter) + "&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20" + self.word
except Exception, e:
print e
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'}
try:
r=requests.get(urly,headers=headers)
except Exception,e:
print e
self.results = r.content
self.totalresults += self.results
def get_people(self):
rawres = myparser.parser(self.totalresults, self.word)
return rawres.people_twitter()
def process(self):
while (self.counter < self.limit):
self.do_search()
self.counter += 100
print "\tSearching " + str(self.counter) + " results.."
| 3.046875 | 3 |
scrap_instagram.py | genaforvena/nn_scrapper | 0 | 4487 | import urllib.request
import json
access_token = "<KEY>"
api_url = "https://api.instagram.com/v1"
nn_lat = 56.296504
nn_lng = 43.936059
def request(endpoint, req_params = ""):
req = api_url + endpoint + "?access_token=" + access_token + "&" + req_params
print(req)
raw_response = urllib.request.urlopen(req).read()
return json.loads(raw_response.decode('utf8'))
locations = request("/locations/search", "lat=" + str(nn_lat) + "&lng=" + str(nn_lng))["data"]
print(locations)
for location in locations:
location_id = location["id"]
location_media = request("/locations/" + str(location_id) + "/media/recent")
print(location_media)
| 3.34375 | 3 |
tests/unit/utils/test_validators.py | kajusK/HiddenPlaces | 0 | 4488 | <reponame>kajusK/HiddenPlaces
"""Unit tests for app.validators. """
from wtforms import ValidationError
import flask
from pytest import raises
from app.utils.validators import password_rules, image_file, allowed_file
class DummyField(object):
"""Dummy field object to emulate wtforms field."""
def __init__(self, data=None, errors=(), raw_data=None):
self.data = data
self.errors = list(errors)
self.raw_data = raw_data
def gettext(self, string):
return string
def ngettext(self, singular, plural, n):
return singular
class DummyForm(dict):
"""Dummy form object to emulate wtforms form."""
pass
class DummyFile(object):
"""Dummy file like class to emulate uploaded file handler."""
def __init__(self, filename):
self.filename = filename
def __repr__(self):
return self.filename
def _run_validator_check(subtests, validator, valid, invalid):
"""Runs tests again validator with valid and invalid inputs.
Args:
subtest: Subtests fixture.
validator: Validator instance to run tests against
valid: List of valid inputs
invalid: List of invalid inputs
"""
field = DummyField()
for item in valid:
field.data = item
with subtests.test(item=item):
validator(DummyForm(), field)
for item in invalid:
field.data = item
with subtests.test(item=item):
with raises(ValidationError):
validator(DummyForm(), field)
def test_allowed_file(subtests, req_context):
validator = allowed_file()
extensions = ['exe', 'html']
valid = ['foo.jpg', 'exe', 'foo.exe.zip', 'foo']
invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html']
valid = [DummyFile(x) for x in valid]
invalid = [DummyFile(x) for x in invalid]
flask.current_app.config['DISABLED_EXTENSIONS'] = extensions
with flask.current_app.test_request_context():
_run_validator_check(subtests, validator, valid, invalid)
def test_allowed_file_multiple(subtests, req_context):
validator = allowed_file()
extensions = ['exe', 'html']
valid = ['foo.jpg', 'exe', 'foo.exe.zip', 'foo']
invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html']
valid = [[DummyFile(x) for x in valid], [DummyFile(valid[0])],
[DummyFile(valid[0]), DummyFile(valid[1])]]
invalid = [[DummyFile(x) for x in invalid], [DummyFile(invalid[0])],
[DummyFile(invalid[0]), DummyFile(invalid[1])]]
flask.current_app.config['DISABLED_EXTENSIONS'] = extensions
with flask.current_app.test_request_context():
_run_validator_check(subtests, validator, valid, invalid)
def test_allowed_file_message(req_context):
validator = allowed_file(message="custom message")
field = DummyField()
field.data = DummyFile("blah.foo")
flask.current_app.config['DISABLED_EXTENSIONS'] = ['foo']
with flask.current_app.test_request_context():
with raises(ValidationError) as e:
validator(DummyForm(), field)
assert str(e.value) == "custom message"
def test_image_file(subtests, req_context):
validator = image_file()
extensions = ['jpg', 'png', 'tiff']
valid = ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg']
invalid = ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif']
valid = [DummyFile(x) for x in valid]
invalid = [DummyFile(x) for x in invalid]
flask.current_app.config['IMAGE_EXTENSIONS'] = extensions
with flask.current_app.test_request_context():
_run_validator_check(subtests, validator, valid, invalid)
def test_image_file_multiple(subtests, req_context):
validator = image_file()
extensions = ['jpg', 'png', 'tiff']
valid = ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg']
invalid = ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif']
valid = [[DummyFile(x) for x in valid], [DummyFile(valid[0])],
[DummyFile(valid[0]), DummyFile(valid[1])]]
invalid = [[DummyFile(x) for x in invalid], [DummyFile(invalid[0])],
[DummyFile(invalid[0]), DummyFile(invalid[1])]]
flask.current_app.config['IMAGE_EXTENSIONS'] = extensions
with flask.current_app.test_request_context():
_run_validator_check(subtests, validator, valid, invalid)
def test_image_file_message(req_context):
validator = image_file(message="custom message")
field = DummyField()
field.data = DummyFile("blah")
flask.current_app.config['IMAGE_EXTENSIONS'] = ['foo']
with flask.current_app.test_request_context():
with raises(ValidationError) as e:
validator(DummyForm(), field)
assert str(e.value) == "custom message"
def test_password_rules_length(subtests):
validator = password_rules(length=6, upper=None, lower=None, numeric=None,
special=None)
valid = ["as123.21", "abcdef", "sdadadaswasasa", "1234567", "...,.,..,",
"AAAAAAA", "AbCdEf"]
invalid = ["abc", "123", "....", "aBcDe", "a1.V3"]
_run_validator_check(subtests, validator, valid, invalid)
def test_password_rules_upper(subtests):
validator = password_rules(length=6, upper=2, lower=None, numeric=None,
special=None)
valid = ["abcDEf", "HellOO", "ABCDEZ", "A.b#3CZ", "ADSDSA"]
invalid = ["abcdEf", "helloo", "A231sdsd"]
_run_validator_check(subtests, validator, valid, invalid)
def test_password_rules_lower(subtests):
validator = password_rules(length=6, upper=None, lower=3, numeric=None,
special=None)
valid = ["abcdefg", "axzBAR", "123abcdsa", "AbCdEfGh", "..as..2ds.."]
invalid = ["foOBAR", "123ABcdSA", "1a2b.C#"]
_run_validator_check(subtests, validator, valid, invalid)
def test_password_rules_numeric(subtests):
validator = password_rules(length=6, upper=None, lower=None, numeric=2,
special=None)
valid = ["1bcd4A.d", "123456", "a?9#.0"]
invalid = ["2ds.#<", "abcdef", "ABCDEF", "x2U.'Q"]
_run_validator_check(subtests, validator, valid, invalid)
def test_password_rules_special(subtests):
validator = password_rules(length=6, upper=None, lower=None, numeric=None,
special=3)
valid = ["ab.?123!", ".#@dS9", "abcdef123><?"]
invalid = ["abcdef", ".23134", "AbCd123,]"]
_run_validator_check(subtests, validator, valid, invalid)
def test_password_rules_all(subtests):
validator = password_rules(length=6, upper=2, lower=1, numeric=1,
special=1)
valid = ["ABc1.2", "abcDEF123#%^", "a2B.C?"]
invalid = ["helloo", "ABCDEF", "Ab1.?c"]
_run_validator_check(subtests, validator, valid, invalid)
def test_password_rules_message(subtests):
validator = password_rules(length=100, message="custom message")
field = DummyField()
field.data = "wrong"
with raises(ValidationError) as e:
validator(DummyForm(), field)
assert str(e.value) == "custom message"
| 2.828125 | 3 |
ts_eval/utils/nans.py | vshulyak/ts-eval | 1 | 4489 | <filename>ts_eval/utils/nans.py<gh_stars>1-10
import warnings
import numpy as np
def nans_in_same_positions(*arrays):
"""
Compares all provided arrays to see if they have NaNs in the same positions.
"""
if len(arrays) == 0:
return True
for arr in arrays[1:]:
if not (np.isnan(arrays[0]) == np.isnan(arr)).all():
return False
return True
def nanmeanw(arr, axis=None):
"""
Computes nanmean without raising a warning in case of NaNs in the dataset
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
return np.nanmean(arr, axis=axis)
| 2.8125 | 3 |
tests/authorization/test_searches.py | UOC/dlkit | 2 | 4490 | <gh_stars>1-10
"""Unit tests of authorization searches."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
from dlkit.abstract_osid.osid import errors
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.type.primitives import Type
from dlkit.runtime import PROXY_SESSION, proxy_example
from dlkit.runtime.managers import Runtime
REQUEST = proxy_example.SimpleRequest()
CONDITION = PROXY_SESSION.get_proxy_condition()
CONDITION.set_http_request(REQUEST)
PROXY = PROXY_SESSION.get_proxy(CONDITION)
DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'})
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def authorization_search_class_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'AUTHORIZATION',
proxy=PROXY,
implementation=request.cls.service_config)
create_form = request.cls.svc_mgr.get_vault_form_for_create([])
create_form.display_name = 'Test catalog'
create_form.description = 'Test catalog description'
request.cls.catalog = request.cls.svc_mgr.create_vault(create_form)
def class_tear_down():
request.cls.svc_mgr.delete_vault(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def authorization_search_test_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.search = request.cls.catalog.get_authorization_search()
@pytest.mark.usefixtures("authorization_search_class_fixture", "authorization_search_test_fixture")
class TestAuthorizationSearch(object):
"""Tests for AuthorizationSearch"""
@pytest.mark.skip('unimplemented test')
def test_search_among_authorizations(self):
"""Tests search_among_authorizations"""
pass
@pytest.mark.skip('unimplemented test')
def test_order_authorization_results(self):
"""Tests order_authorization_results"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_authorization_search_record(self):
"""Tests get_authorization_search_record"""
pass
@pytest.mark.usefixtures("authorization_search_results_class_fixture", "authorization_search_results_test_fixture")
class TestAuthorizationSearchResults(object):
"""Tests for AuthorizationSearchResults"""
@pytest.mark.skip('unimplemented test')
def test_get_authorizations(self):
"""Tests get_authorizations"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_authorization_query_inspector(self):
"""Tests get_authorization_query_inspector"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_authorization_search_results_record(self):
"""Tests get_authorization_search_results_record"""
pass
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def vault_search_class_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'AUTHORIZATION',
proxy=PROXY,
implementation=request.cls.service_config)
create_form = request.cls.svc_mgr.get_vault_form_for_create([])
create_form.display_name = 'Test catalog'
create_form.description = 'Test catalog description'
request.cls.catalog = request.cls.svc_mgr.create_vault(create_form)
def class_tear_down():
request.cls.svc_mgr.delete_vault(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def vault_search_test_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.search = request.cls.catalog.get_vault_search()
@pytest.mark.usefixtures("vault_search_class_fixture", "vault_search_test_fixture")
class TestVaultSearch(object):
"""Tests for VaultSearch"""
@pytest.mark.skip('unimplemented test')
def test_search_among_vaults(self):
"""Tests search_among_vaults"""
pass
@pytest.mark.skip('unimplemented test')
def test_order_vault_results(self):
"""Tests order_vault_results"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_vault_search_record(self):
"""Tests get_vault_search_record"""
pass
@pytest.mark.usefixtures("vault_search_results_class_fixture", "vault_search_results_test_fixture")
class TestVaultSearchResults(object):
"""Tests for VaultSearchResults"""
@pytest.mark.skip('unimplemented test')
def test_get_vaults(self):
"""Tests get_vaults"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_vault_query_inspector(self):
"""Tests get_vault_query_inspector"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_vault_search_results_record(self):
"""Tests get_vault_search_results_record"""
pass
| 2.171875 | 2 |
mechroutines/models/_flux.py | keceli/mechdriver | 1 | 4491 | <filename>mechroutines/models/_flux.py
"""
NEW: Handle flux files
"""
import autofile
def read_flux(ts_save_path, vrc_locs=(0,)):
""" Read the geometry from the filesys
"""
vrc_fs = autofile.fs.vrctst(ts_save_path)
if vrc_fs[-1].file.flux.exists(vrc_locs):
flux_str = vrc_fs[-1].file.flux.read(vrc_locs)
else:
flux_str = None
return flux_str
| 2.578125 | 3 |
RandomForest/RandomForest.py | nachiket273/ML_Algo_Implemented | 7 | 4492 | import math
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
import sys
import os
sys.path.append(os.path.abspath('../DecisionTree'))
from DecisionTree import DecisionTree
class RandomForest(BaseEstimator):
"""
Simple implementation of Random Forest.
This class has implementation for Random Forest classifier and regressor.
Dataset bagging is done by simple numpy random choice with replacement.
For classification the prediction is by majority vote.
For regression tree the prediction is averge of all estimator predictions.
Args:
n_estimators Number of base estimators (Decision Trees here)
max_features Maximum features to be used to construct tree.
Default:
- If classifier, default is square root of total
features.
- If regressor, default is total number of features.
max_depth The maximum depth to which estimators needs to be constructed.
Default: np.inf
min_samples_split Minimum number of samples need to present for split at the
node.
Default: 2
criterion criterion to be used for split.
For classification tree following criterion are supported:
- gini
- entropy
For regression tree following criterion are supported:
- mse (mean squared error)
- mae (mean absolute error)
Default: gini
random_seed random seed value for numpy operations.
Default: 0
"""
def __init__(self, n_estimators, max_features=0, max_depth=np.inf, min_samples_split=2,
criterion='gini', random_seed=0):
self.n_estimators = n_estimators
self.max_features = max_features
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.criterion = criterion
self.random_seed = random_seed
self.idxs = []
self.trees = []
for i in range(self.n_estimators):
self.trees.append(DecisionTree(max_depth= self.max_depth,
min_samples_split=self.min_samples_split,
max_features = self.max_features,
criterion=self.criterion,
random_seed = self.random_seed))
self.is_classification_forest = False
if self.criterion == 'gini' or self.criterion == 'entropy':
self.is_classification_forest = True
elif self.criterion == 'mse' or self.criterion == 'mae':
self.is_classification_forest = False
else:
raise Exception("Invalid criterion: {}".format(self.criterion))
def get_subsets(self, X, y, num=1):
subsets = []
if len(np.shape(y)) == 1:
y = np.expand_dims(y, axis=1)
Xy = np.concatenate((X, y), axis=1)
num_samples = X.shape[0]
np.random.shuffle(Xy)
rng = np.random.default_rng(seed= self.random_seed)
for _ in range(num):
idx = rng.choice(
range(num_samples),
size = np.shape(range(int(num_samples)), ),
replace=True
)
subsets.append([X[idx], y[idx]])
return subsets
def fit(self, X, y):
np.random.seed(self.random_seed)
if isinstance(X, pd.DataFrame):
X = X.to_numpy()
subsets = self.get_subsets(X, y, self.n_estimators)
if self.max_features == 0:
if self.is_classification_forest:
self.max_features = int(math.sqrt(X.shape[1]))
else:
self.max_features = int(X.shape[1])
# Bagging - choose random features for each estimator
# if max_features is provided, else use square root of
# total number of features.
for i, _ in enumerate(self.trees):
self.trees[i].max_features = self.max_features
X_sub, y_sub = subsets[i]
self.trees[i].fit(X_sub, y_sub)
def predict(self, X):
all_preds = np.empty((X.shape[0], self.n_estimators))
for i, tree in enumerate(self.trees):
preds = tree.predict(X)
all_preds[:, i] = preds
y_preds = []
for preds in all_preds:
if self.is_classification_forest:
y_preds.append(np.bincount(preds.astype('int')).argmax())
else:
y_preds.append(np.average(preds))
return y_preds | 3.546875 | 4 |
tests/basics/generator_pend_throw.py | iotctl/pycopy | 663 | 4493 | def gen():
i = 0
while 1:
yield i
i += 1
g = gen()
try:
g.pend_throw
except AttributeError:
print("SKIP")
raise SystemExit
print(next(g))
print(next(g))
g.pend_throw(ValueError())
v = None
try:
v = next(g)
except Exception as e:
print("raised", repr(e))
print("ret was:", v)
# It's legal to pend exception in a just-started generator, just the same
# as it's legal to .throw() into it.
g = gen()
g.pend_throw(ValueError())
try:
next(g)
except ValueError:
print("ValueError from just-started gen")
| 3.03125 | 3 |
src/UnitTypes/ProjectileModule.py | USArmyResearchLab/ARL_Battlespace | 1 | 4494 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 15 09:49:47 2020
@author: james.z.hare
"""
from src.UnitModule import UnitClass, advance
from copy import deepcopy
import math
class ProjectileClass(UnitClass):
"""
The Projectile Class
This is a subclass to the UnitClass
Virtual Functions
-----------------
- `__copy__()` to make shallow copies
- `__deepcopy__(memo)` to make deep copies
- `possibleActions(State)` to identify legal actions
- `observe(Unit)` to observe units located within VisibleRange
- `overlaps(Unit)` to identify if the unit overlaps with another unit
- `execute(Action, State)` to execute the action
Attributes
----------
ID:
a unique identifier of this unit
Owner:
the player the unit belongs to
Health:
the health of the unit
Extent:
the space occupied by unit
Position:
location of unit
Orientation:
as the name says
VisibleRange:
how far the unit can observe
Actions: dict
dictionary of actions common accross all units
ActionOptions:
list of list of action options.
Attack:
int that defines whether the unit is attacking in an advance action
RemaingLifetime:
int that defines the total number of turns until the unit is dead
"""
def __init__(self, ID, Owner, Health, RemainingLifetime=math.inf):
UnitClass.__init__(self, ID, Owner, Health, Extent=(1,1))
self.Actions = { "advance": lambda x: advance(self, x) }
self.ActionOptions = ( ( "advance", ), )
self.Attack = None
self.RemainingLifetime = RemainingLifetime
def __copy__(self):
Duplicate = ProjectileClass(self.ID, self.Owner, self.Health)
Duplicate.Position = self.Position
Duplicate.Orientation = self.Orientation
Duplicate.Attack = self.Attack
Duplicate.RemainingLifetime = self.RemainingLifetime
return Duplicate
def __deepcopy__(self, memo):
Default = None
Exists = memo.get(self, Default)
if Exists is not Default:
return Exists
Duplicate = ProjectileClass(deepcopy(self.ID, memo), deepcopy(self.Owner ,memo), deepcopy(self.Health, memo))
Duplicate.Position = deepcopy(self.Position, memo)
Duplicate.Orientation = deepcopy(self.Orientation, memo)
Duplicate.Attack = deepcopy(self.Attack, memo)
Duplicate.RemainingLifetime = deepcopy(self.RemainingLifetime, memo)
memo[self] = Duplicate
return Duplicate
def possibleActions(self, State):
"""
Identifies the set of feasible actions given the board size and position of the unit
Parameters
----------
State: StateClass
Returns
-------
TrueActions: list[str]
A list of the feasible actions
"""
return self.ActionOptions
def observe(self, Unit):
if Unit.ID == self.ID:
return Unit
return None
def overlaps(self, Unit):
MyOccupiedSpace = set([ (self.Position[0]+x, self.Position[1]+y, self.Position[2]) for x in range(self.Extent[0]) for y in range(self.Extent[1]) ])
#print(Unit)
TheirOccupiedSpace = set([ (Unit.Position[0]+x, Unit.Position[1]+y, Unit.Position[2]) for x in range(Unit.Extent[0]) for y in range(Unit.Extent[1]) ])
return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0
def execute(self, Actions, State):
"""
Execute `Actions` on `State`.
Parameters
----------
Actions : list[str]
A set of actions to be performed on `State`.
State : StateClass
State on which to inflict actions.
Returns
-------
Changes : list
Resulting state of executed `Actions`.
"""
NewState = deepcopy(State)
Changes = []
for Action in Actions:
ActionResult = self.Actions[Action](NewState)
ActionResult[1].RemainingLifetime -= 1
if isinstance(ActionResult, list):
Changes += ActionResult
else:
Changes.append(ActionResult)
return Changes
# Will be used as the projectile for the missile launcher unit
class MissileClass(ProjectileClass):
def __init__(self, ID, Owner, Position, Life=1):
ProjectileClass.__init__(self, ID, Owner, Positon=Position, Life=Life) | 3.375 | 3 |
OOP_MiniQuiz/run_car_Level2.py | HelloYeew/helloyeew-lab-computer-programming-i | 0 | 4495 | from car import *
def compare(car1,car2):
print(car1)
print(car2)
car1 = Car("Nissan","Tiida",450000)
car2 = Car("Toyota","Vios",400000)
car3 = Car("BMW","X3",3400000)
compare(car3,car1)
compare(car1,car2) | 3.109375 | 3 |
prelude/monads.py | michel-slm/python-prelude | 2 | 4496 | from abc import ABCMeta, abstractmethod
from prelude.typeclasses import Monad
from prelude.decorators import monad_eq, singleton
@monad_eq
class Either(Monad):
__metaclass__ = ABCMeta
@classmethod
def mreturn(cls, val):
return Right(val)
@abstractmethod
def __iter__(self):
pass
class Left(Either):
def __init__(self, val):
self.__val = val
def __rshift__(self, f):
return self
def __iter__(self):
return iter([])
def __eq__(self, other):
return type(self) == type(other)
def __repr__(self):
return "Left({})".format(self.__val)
class Right(Either):
def __init__(self, val):
self.__val = val
def __rshift__(self, f):
return f(self.__val)
def __iter__(self):
yield self.__val
def __repr__(self):
return "Right({})".format(self.__val)
class Maybe(Monad):
__metaclass__ = ABCMeta
@classmethod
def mreturn(cls, val):
return Just(val)
@abstractmethod
def __iter__(self):
pass
@monad_eq
class Just(Maybe):
def __init__(self, val):
self.__val = val
def __rshift__(self, f):
return f(self.__val)
def __iter__(self):
yield self.__val
def __repr__(self):
return "Just({})".format(self.__val)
@singleton
class Nothing(Maybe):
def __rshift__(self, f):
return self
def __iter__(self):
return iter([])
def __repr__(self):
return "Nothing()"
| 2.90625 | 3 |
Deep Sort/src/imgconverter.py | JJavier98/TFG-Dron-de-Vigilancia | 0 | 4497 | <gh_stars>0
#!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('msgs_to_cv2')
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class image_converter:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/bebop/image_raw",Image,self.callback)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
cv2.imshow("hola", cv_image)
cv2.waitKey(3)
def main(args):
while True:
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
"""
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
"""
if __name__ == '__main__':
main(sys.argv)
| 2.78125 | 3 |
foodx_devops_tools/azure/__init__.py | Food-X-Technologies/foodx_devops_tools | 3 | 4498 | <reponame>Food-X-Technologies/foodx_devops_tools<filename>foodx_devops_tools/azure/__init__.py
# Copyright (c) 2021 Food-X Technologies
#
# This file is part of foodx_devops_tools.
#
# You should have received a copy of the MIT License along with
# foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>.
"""Azure related utilities."""
| 0.921875 | 1 |
beartype/vale/__init__.py | posita/beartype | 0 | 4499 | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype validators.**
This submodule publishes a PEP-compliant hierarchy of subscriptable (indexable)
classes enabling callers to validate the internal structure of arbitrarily
complex scalars, data structures, and third-party objects. Like annotation
objects defined by the :mod:`typing` module (e.g., :attr:`typing.Union`), these
classes dynamically generate PEP-compliant type hints when subscripted
(indexed) and are thus intended to annotate callables and variables. Unlike
annotation objects defined by the :mod:`typing` module, these classes are *not*
explicitly covered by existing PEPs and thus *not* directly usable as
annotations.
Instead, callers are expected to (in order):
#. Annotate callable parameters and returns to be validated with
:pep:`593`-compliant :attr:`typing.Annotated` type hints.
#. Subscript those hints with (in order):
#. The type of those parameters and returns.
#. One or more subscriptions of classes declared by this submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To avoid polluting the public module namespace, external attributes
# should be locally imported at module scope *ONLY* under alternate private
# names (e.g., "from argparse import ArgumentParser as _ArgumentParser" rather
# than merely "from argparse import ArgumentParser").
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype.vale._is._valeis import _IsFactory
from beartype.vale._is._valeistype import (
_IsInstanceFactory,
_IsSubclassFactory,
)
from beartype.vale._is._valeisobj import _IsAttrFactory
from beartype.vale._is._valeisoper import _IsEqualFactory
# ....................{ SINGLETONS }....................
# Public factory singletons instantiating these private factory classes.
Is = _IsFactory(basename='Is')
IsAttr = _IsAttrFactory(basename='IsAttr')
IsEqual = _IsEqualFactory(basename='IsEqual')
IsInstance = _IsInstanceFactory(basename='IsInstance')
IsSubclass = _IsSubclassFactory(basename='IsSubclass')
# Delete all private factory classes imported above for safety.
del (
_IsFactory,
_IsAttrFactory,
_IsEqualFactory,
_IsInstanceFactory,
_IsSubclassFactory,
)
# ....................{ TODO }....................
#FIXME: As intelligently requested by @Saphyel at #32, add support for
#additional classes support constraints resembling:
#
#* String constraints:
# * Email.
# * Uuid.
# * Choice.
# * Language.
# * Locale.
# * Country.
# * Currency.
#* Comparison constraints
# * IdenticalTo.
# * NotIdenticalTo.
# * LessThan.
# * GreaterThan.
# * Range.
# * DivisibleBy.
#FIXME: Add a new BeartypeValidator.get_cause_or_none() method with the same
#signature and docstring as the existing CauseSleuth.get_cause_or_none()
#method. This new BeartypeValidator.get_cause_or_none() method should then be
#called by the "_peperrorannotated" submodule to generate human-readable
#exception messages. Note that this implies that:
#* The BeartypeValidator.__init__() method will need to additionally accept a new
# mandatory "get_cause_or_none: Callable[[], Optional[str]]" parameter, which
# that method should then localize to "self.get_cause_or_none".
#* Each __class_getitem__() dunder method of each "_BeartypeValidatorFactoryABC" subclass will need
# to additionally define and pass that callable when creating and returning
# its "BeartypeValidator" instance.
#FIXME: *BRILLIANT IDEA.* Holyshitballstime. The idea here is that we can
#leverage all of our existing "beartype.is" infrastructure to dynamically
#synthesize PEP-compliant type hints that would then be implicitly supported by
#any runtime type checker. At present, subscriptions of "Is" (e.g.,
#"Annotated[str, Is[lambda text: bool(text)]]") are only supported by beartype
#itself. Of course, does anyone care? I mean, if you're using a runtime type
#checker, you're probably *ONLY* using beartype. Right? That said, this would
#technically improve portability by allowing users to switch between different
#checkers... except not really, since they'd still have to import beartype
#infrastructure to do so. So, this is probably actually useless.
#
#Nonetheless, the idea itself is trivial. We declare a new
#"beartype.is.Portable" singleton accessed in the same way: e.g.,
# from beartype import beartype
# from beartype.is import Portable
# NonEmptyStringTest = Is[lambda text: bool(text)]
# NonEmptyString = Portable[str, NonEmptyStringTest]
# @beartype
# def munge_it(text: NonEmptyString) -> str: ...
#
#So what's the difference between "typing.Annotated" and "beartype.is.Portable"
#then? Simple. The latter dynamically generates one new PEP 3119-compliant
#metaclass and associated class whenever subscripted. Clearly, this gets
#expensive in both space and time consumption fast -- which is why this won't
#be the default approach. For safety, this new class does *NOT* subclass the
#first subscripted class. Instead:
#* This new metaclass of this new class simply defines an __isinstancecheck__()
# dunder method. For the above example, this would be:
# class NonEmptyStringMetaclass(object):
# def __isinstancecheck__(cls, obj) -> bool:
# return isinstance(obj, str) and NonEmptyStringTest(obj)
#* This new class would then be entirely empty. For the above example, this
# would be:
# class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass):
# pass
#
#Well, so much for brilliant. It's slow and big, so it seems doubtful anyone
#would actually do that. Nonetheless, that's food for thought for you.
| 2.09375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.