repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
aliharby12/Simple-vezeeta-project | src/users/migrations/0014_auto_20200801_1008.py | feb6df8b354ac284edc645059bea17021169dcfa | # Generated by Django 2.2 on 2020-08-01 08:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0013_auto_20200731_1810'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='Specialist_doctor',
field=models.CharField(choices=[('جراحه اطفال', 'جراحه اططفال'), ('تخسيس', 'تخسيس'), ('عظام', 'عظام'), ('جراحه عامه', 'جراحه عامه'), ('اطفال', 'اطفال'), ('اورام', 'اورام'), ('مخ واعصاب', 'مخ واعصاب'), ('انف واذن', 'انف واذن'), ('امراض دم', 'امراض دم'), ('باطنة', 'باطنه'), ('اسنان', 'اسنان'), ('جراحه تجميل', 'جراحه تجميل'), ('حميات', 'حميات'), ('نسا وتوليد', 'نسا وتوليد')], default='باطنه', max_length=255, verbose_name='التخصص'),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='اسم صاحب التعليق')),
('email', models.EmailField(max_length=254, verbose_name='البريد الالكتروني')),
('body', models.TextField(verbose_name='محتوى التعليق')),
('comment_date', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='users.Profile')),
],
),
]
| [((17, 18, 17, 643), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((22, 23, 22, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((23, 25, 23, 104), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((24, 26, 24, 109), 'django.db.models.EmailField', 'models.EmailField', (), '', False, 'from django.db import migrations, models\n'), ((25, 25, 25, 83), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((26, 33, 26, 72), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((27, 27, 27, 61), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n'), ((28, 25, 28, 132), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')] |
tienthegainz/pipeline_executor_docker_call | caller_v3/app/api/v1/docker.py | b2b9478056e4b818f5963b0b266375fe6d39627a | from typing import Any, List, Callable
from fastapi import APIRouter, HTTPException, status, BackgroundTasks
from app import schemas
from app.core import docker_client
import json
from copy import deepcopy
router = APIRouter()
@router.get("/images", response_model=schemas.DockerImageRespond)
def get_docker_image() -> Any:
images_list = docker_client.images.list(all=True)
return {
'images': [{'id': image.short_id, 'tags': image.tags} for image in images_list if image.tags]
}
@router.get("/volumes", response_model=schemas.DockerVolumeRespond)
def get_docker_volume() -> Any:
volumes_list = docker_client.volumes.list()
return {
'volumes': [{'id': volume.short_id, 'name': volume.name} for volume in volumes_list]
}
| [((11, 9, 11, 20), 'fastapi.APIRouter', 'APIRouter', ({}, {}), '()', False, 'from fastapi import APIRouter, HTTPException, status, BackgroundTasks\n'), ((16, 18, 16, 53), 'app.core.docker_client.images.list', 'docker_client.images.list', (), '', False, 'from app.core import docker_client\n'), ((23, 19, 23, 47), 'app.core.docker_client.volumes.list', 'docker_client.volumes.list', ({}, {}), '()', False, 'from app.core import docker_client\n')] |
kalyc/keras-apache-mxnet | keras/models.py | 5497ebd50a45ccc446b8944ebbe11fb7721a5533 | """Model-related utilities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import backend as K
from .utils.generic_utils import has_arg
from .utils.generic_utils import to_list
from .engine.input_layer import Input
from .engine.input_layer import InputLayer
from .engine.training import Model
from .engine.sequential import Sequential
from .engine.saving import save_model
from .engine.saving import load_model
from .engine.saving import model_from_config
from .engine.saving import model_from_yaml
from .engine.saving import model_from_json
from .engine.saving import save_mxnet_model
try:
import h5py
except ImportError:
h5py = None
def _clone_functional_model(model, input_tensors=None):
"""Clone a functional `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Model`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Model):
raise ValueError('Expected `model` argument '
'to be a `Model` instance, got ', model)
if isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a functional `Model` instance, '
'got a `Sequential` instance instead:', model)
layer_map = {} # Cache for created layers.
tensor_map = {} # Map {reference_tensor: (corresponding_tensor, mask)}
if input_tensors is None:
# Create placeholders to build the model on top of.
input_layers = []
input_tensors = []
for layer in model._input_layers:
input_tensor = Input(batch_shape=layer.batch_input_shape,
dtype=layer.dtype,
sparse=layer.sparse,
name=layer.name)
input_tensors.append(input_tensor)
# Cache newly created input layer.
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[layer] = newly_created_input_layer
for _original, _cloned in zip(model._input_layers, input_layers):
layer_map[_original] = _cloned
else:
# Make sure that all input tensors come from a Keras layer.
# If tensor comes from an input layer: cache the input layer.
input_tensors = to_list(input_tensors)
_input_tensors = []
for i, x in enumerate(input_tensors):
if not K.is_keras_tensor(x):
name = model._input_layers[i].name
input_tensor = Input(tensor=x,
name='input_wrapper_for_' + name)
_input_tensors.append(input_tensor)
# Cache newly created input layer.
original_input_layer = x._keras_history[0]
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[original_input_layer] = newly_created_input_layer
else:
_input_tensors.append(x)
input_tensors = _input_tensors
for x, y in zip(model.inputs, input_tensors):
tensor_map[x] = (y, None) # tensor, mask
# Iterated over every node in the reference model, in depth order.
depth_keys = list(model._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = model._nodes_by_depth[depth]
for node in nodes:
# Recover the corresponding layer.
layer = node.outbound_layer
# Get or create layer.
if layer not in layer_map:
# Clone layer.
new_layer = layer.__class__.from_config(layer.get_config())
layer_map[layer] = new_layer
layer = new_layer
else:
# Reuse previously cloned layer.
layer = layer_map[layer]
# Don't call InputLayer multiple times.
if isinstance(layer, InputLayer):
continue
# Gather inputs to call the new layer.
reference_input_tensors = node.input_tensors
reference_output_tensors = node.output_tensors
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
computed_data = [] # List of tuples (input, mask).
for x in reference_input_tensors:
if x in tensor_map:
computed_data.append(tensor_map[x])
if len(computed_data) == len(reference_input_tensors):
# Call layer.
if node.arguments:
kwargs = node.arguments
else:
kwargs = {}
if len(computed_data) == 1:
computed_tensor, computed_mask = computed_data[0]
if has_arg(layer.call, 'mask'):
if 'mask' not in kwargs:
kwargs['mask'] = computed_mask
output_tensors = to_list(
layer(computed_tensor, **kwargs))
output_masks = to_list(
layer.compute_mask(computed_tensor,
computed_mask))
computed_tensors = [computed_tensor]
computed_masks = [computed_mask]
else:
computed_tensors = [x[0] for x in computed_data]
computed_masks = [x[1] for x in computed_data]
if has_arg(layer.call, 'mask'):
if 'mask' not in kwargs:
kwargs['mask'] = computed_masks
output_tensors = to_list(
layer(computed_tensors, **kwargs))
output_masks = to_list(
layer.compute_mask(computed_tensors,
computed_masks))
# Update tensor_map.
for x, y, mask in zip(reference_output_tensors,
output_tensors,
output_masks):
tensor_map[x] = (y, mask)
# Check that we did compute the model outputs,
# then instantiate a new model from inputs and outputs.
output_tensors = []
for x in model.outputs:
assert x in tensor_map, 'Could not compute output ' + str(x)
tensor, _ = tensor_map[x]
output_tensors.append(tensor)
return Model(input_tensors, output_tensors, name=model.name)
def _clone_sequential_model(model, input_tensors=None):
"""Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a `Sequential` model instance, '
'but got:', model)
def clone(layer):
return layer.__class__.from_config(layer.get_config())
layers = [clone(layer) for layer in model.layers]
if input_tensors is None:
return Sequential(layers=layers, name=model.name)
else:
if len(to_list(input_tensors)) != 1:
raise ValueError('To clone a `Sequential` model, we expect '
' at most one tensor '
'as part of `input_tensors`.')
x = to_list(input_tensors)[0]
if K.is_keras_tensor(x):
origin_layer = x._keras_history[0]
if isinstance(origin_layer, InputLayer):
return Sequential(layers=[origin_layer] + layers,
name=model.name)
else:
raise ValueError('Cannot clone a `Sequential` model on top '
'of a tensor that comes from a Keras layer '
'other than an `InputLayer`. '
'Use the functional API instead.')
input_tensor = Input(tensor=x,
name='input_wrapper_for_' + str(x.name))
input_layer = input_tensor._keras_history[0]
return Sequential(layers=[input_layer] + layers, name=model.name)
def clone_model(model, input_tensors=None):
"""Clone any `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Model`
(could be a functional model or a Sequential model).
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if isinstance(model, Sequential):
return _clone_sequential_model(model, input_tensors=input_tensors)
else:
return _clone_functional_model(model, input_tensors=input_tensors)
| [] |
erinxocon/vscode-python | pythonFiles/tests/testing_tools/adapter/test_functional.py | e53f9061d16467a9ae2d8995a9a5f3cfa0f444e1 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import unicode_literals
import json
import os
import os.path
import subprocess
import sys
import unittest
import pytest
from ...__main__ import TESTING_TOOLS_ROOT
CWD = os.getcwd()
DATA_DIR = os.path.join(os.path.dirname(__file__), '.data')
SCRIPT = os.path.join(TESTING_TOOLS_ROOT, 'run_adapter.py')
def resolve_testroot(name):
projroot = os.path.join(DATA_DIR, name)
return projroot, os.path.join(projroot, 'tests')
def run_adapter(cmd, tool, *cliargs):
try:
return _run_adapter(cmd, tool, *cliargs)
except subprocess.CalledProcessError:
# Re-run pytest but print out stdout & stderr this time
try:
return _run_adapter(cmd, tool, *cliargs, hidestdio=False)
except subprocess.CalledProcessError as exc:
print(exc.output)
def _run_adapter(cmd, tool, *cliargs, **kwargs):
hidestdio = kwargs.pop('hidestdio', True)
assert not kwargs
kwds = {}
argv = [sys.executable, SCRIPT, cmd, tool, '--'] + list(cliargs)
if not hidestdio:
argv.insert(4, '--no-hide-stdio')
kwds['stderr'] = subprocess.STDOUT
argv.append('--cache-clear')
print('running {!r}'.format(' '.join(arg.rpartition(CWD + '/')[-1] for arg in argv)))
return subprocess.check_output(argv,
universal_newlines=True,
**kwds)
def fix_path(nodeid):
return nodeid.replace('/', os.path.sep)
def fix_test_order(tests):
if sys.version_info >= (3, 6):
return tests
fixed = []
curfile = None
group = []
for test in tests:
if (curfile or '???') not in test['id']:
fixed.extend(sorted(group, key=lambda t: t['id']))
group = []
curfile = test['id'].partition('.py::')[0] + '.py'
group.append(test)
fixed.extend(sorted(group, key=lambda t: t['id']))
return fixed
def fix_source(tests, testid, srcfile, lineno):
testid = fix_path(testid)
for test in tests:
if test['id'] == testid:
break
else:
raise KeyError('test {!r} not found'.format(testid))
if not srcfile:
srcfile = test['source'].rpartition(':')[0]
test['source'] = fix_path('{}:{}'.format(srcfile, lineno))
@pytest.mark.functional
class PytestTests(unittest.TestCase):
def complex(self, testroot):
results = COMPLEX.copy()
results['root'] = testroot
return [results]
def test_discover_simple(self):
projroot, testroot = resolve_testroot('simple')
out = run_adapter('discover', 'pytest',
'--rootdir', projroot,
testroot)
result = json.loads(out)
self.maxDiff = None
self.assertEqual(result, [{
'root': projroot,
'rootid': '.',
'parents': [
{'id': fix_path('./tests'),
'kind': 'folder',
'name': 'tests',
'parentid': '.',
},
{'id': fix_path('./tests/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests'),
},
],
'tests': [
{'id': fix_path('./tests/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/test_spam.py'),
},
],
}])
def test_discover_complex_default(self):
projroot, testroot = resolve_testroot('complex')
expected = self.complex(projroot)
expected[0]['tests'] = fix_test_order(expected[0]['tests'])
if sys.version_info < (3,):
decorated = [
'./tests/test_unittest.py::MyTests::test_skipped',
'./tests/test_unittest.py::MyTests::test_maybe_skipped',
'./tests/test_unittest.py::MyTests::test_maybe_not_skipped',
]
for testid in decorated:
fix_source(expected[0]['tests'], testid, None, 0)
out = run_adapter('discover', 'pytest',
'--rootdir', projroot,
testroot)
result = json.loads(out)
result[0]['tests'] = fix_test_order(result[0]['tests'])
self.maxDiff = None
self.assertEqual(result, expected)
def test_discover_complex_doctest(self):
projroot, _ = resolve_testroot('complex')
expected = self.complex(projroot)
# add in doctests from test suite
expected[0]['parents'].insert(3, {
'id': fix_path('./tests/test_doctest.py'),
'kind': 'file',
'name': 'test_doctest.py',
'parentid': fix_path('./tests'),
})
expected[0]['tests'].insert(2, {
'id': fix_path('./tests/test_doctest.py::tests.test_doctest'),
'name': 'tests.test_doctest',
'source': fix_path('./tests/test_doctest.py:1'),
'markers': [],
'parentid': fix_path('./tests/test_doctest.py'),
})
# add in doctests from non-test module
expected[0]['parents'].insert(0, {
'id': fix_path('./mod.py'),
'kind': 'file',
'name': 'mod.py',
'parentid': '.',
})
expected[0]['tests'] = [
{'id': fix_path('./mod.py::mod'),
'name': 'mod',
'source': fix_path('./mod.py:1'),
'markers': [],
'parentid': fix_path('./mod.py'),
},
{'id': fix_path('./mod.py::mod.Spam'),
'name': 'mod.Spam',
'source': fix_path('./mod.py:33'),
'markers': [],
'parentid': fix_path('./mod.py'),
},
{'id': fix_path('./mod.py::mod.Spam.eggs'),
'name': 'mod.Spam.eggs',
'source': fix_path('./mod.py:43'),
'markers': [],
'parentid': fix_path('./mod.py'),
},
{'id': fix_path('./mod.py::mod.square'),
'name': 'mod.square',
'source': fix_path('./mod.py:18'),
'markers': [],
'parentid': fix_path('./mod.py'),
},
] + expected[0]['tests']
expected[0]['tests'] = fix_test_order(expected[0]['tests'])
if sys.version_info < (3,):
decorated = [
'./tests/test_unittest.py::MyTests::test_skipped',
'./tests/test_unittest.py::MyTests::test_maybe_skipped',
'./tests/test_unittest.py::MyTests::test_maybe_not_skipped',
]
for testid in decorated:
fix_source(expected[0]['tests'], testid, None, 0)
out = run_adapter('discover', 'pytest',
'--rootdir', projroot,
'--doctest-modules',
projroot)
result = json.loads(out)
result[0]['tests'] = fix_test_order(result[0]['tests'])
self.maxDiff = None
self.assertEqual(result, expected)
def test_discover_not_found(self):
projroot, testroot = resolve_testroot('notests')
out = run_adapter('discover', 'pytest',
'--rootdir', projroot,
testroot)
result = json.loads(out)
self.maxDiff = None
self.assertEqual(result, [])
# TODO: Expect the following instead?
#self.assertEqual(result, [{
# 'root': projroot,
# 'rootid': '.',
# 'parents': [],
# 'tests': [],
# }])
COMPLEX = {
'root': None,
'rootid': '.',
'parents': [
#
{'id': fix_path('./tests'),
'kind': 'folder',
'name': 'tests',
'parentid': '.',
},
# +++
{'id': fix_path('./tests/test_42-43.py'),
'kind': 'file',
'name': 'test_42-43.py',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_42.py'),
'kind': 'file',
'name': 'test_42.py',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_doctest.txt'),
'kind': 'file',
'name': 'test_doctest.txt',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_foo.py'),
'kind': 'file',
'name': 'test_foo.py',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_mixed.py'),
'kind': 'file',
'name': 'test_mixed.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_mixed.py::MyTests'),
'kind': 'suite',
'name': 'MyTests',
'parentid': fix_path('./tests/test_mixed.py'),
},
{'id': fix_path('./tests/test_mixed.py::TestMySuite'),
'kind': 'suite',
'name': 'TestMySuite',
'parentid': fix_path('./tests/test_mixed.py'),
},
# +++
{'id': fix_path('./tests/test_pytest.py'),
'kind': 'file',
'name': 'test_pytest.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_pytest.py::TestEggs'),
'kind': 'suite',
'name': 'TestEggs',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam'),
'kind': 'suite',
'name': 'TestParam',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest.py::TestParam'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll'),
'kind': 'suite',
'name': 'TestParamAll',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
'kind': 'function',
'name': 'test_spam_13',
'parentid': fix_path('./tests/test_pytest.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam'),
'kind': 'suite',
'name': 'TestSpam',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::TestHam'),
'kind': 'suite',
'name': 'TestHam',
'parentid': fix_path('./tests/test_pytest.py::TestSpam'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::TestHam::TestEggs'),
'kind': 'suite',
'name': 'TestEggs',
'parentid': fix_path('./tests/test_pytest.py::TestSpam::TestHam'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture_param'),
'kind': 'function',
'name': 'test_fixture_param',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_01'),
'kind': 'function',
'name': 'test_param_01',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_11'),
'kind': 'function',
'name': 'test_param_11',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers'),
'kind': 'function',
'name': 'test_param_13_markers',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
'kind': 'function',
'name': 'test_param_13_repeat',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
'kind': 'function',
'name': 'test_param_13_skipped',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13'),
'kind': 'function',
'name': 'test_param_23_13',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises'),
'kind': 'function',
'name': 'test_param_23_raises',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33'),
'kind': 'function',
'name': 'test_param_33',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids'),
'kind': 'function',
'name': 'test_param_33_ids',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture'),
'kind': 'function',
'name': 'test_param_fixture',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
'kind': 'function',
'name': 'test_param_mark_fixture',
'parentid': fix_path('./tests/test_pytest.py'),
},
# +++
{'id': fix_path('./tests/test_pytest_param.py'),
'kind': 'file',
'name': 'test_pytest_param.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll'),
'kind': 'suite',
'name': 'TestParamAll',
'parentid': fix_path('./tests/test_pytest_param.py'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
'kind': 'function',
'name': 'test_spam_13',
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest_param.py::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest_param.py'),
},
# +++
{'id': fix_path('./tests/test_unittest.py'),
'kind': 'file',
'name': 'test_unittest.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests'),
'kind': 'suite',
'name': 'MyTests',
'parentid': fix_path('./tests/test_unittest.py'),
},
{'id': fix_path('./tests/test_unittest.py::OtherTests'),
'kind': 'suite',
'name': 'OtherTests',
'parentid': fix_path('./tests/test_unittest.py'),
},
##
{'id': fix_path('./tests/v'),
'kind': 'folder',
'name': 'v',
'parentid': fix_path('./tests'),
},
## +++
{'id': fix_path('./tests/v/test_eggs.py'),
'kind': 'file',
'name': 'test_eggs.py',
'parentid': fix_path('./tests/v'),
},
{'id': fix_path('./tests/v/test_eggs.py::TestSimple'),
'kind': 'suite',
'name': 'TestSimple',
'parentid': fix_path('./tests/v/test_eggs.py'),
},
## +++
{'id': fix_path('./tests/v/test_ham.py'),
'kind': 'file',
'name': 'test_ham.py',
'parentid': fix_path('./tests/v'),
},
## +++
{'id': fix_path('./tests/v/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/v'),
},
##
{'id': fix_path('./tests/w'),
'kind': 'folder',
'name': 'w',
'parentid': fix_path('./tests'),
},
## +++
{'id': fix_path('./tests/w/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/w'),
},
## +++
{'id': fix_path('./tests/w/test_spam_ex.py'),
'kind': 'file',
'name': 'test_spam_ex.py',
'parentid': fix_path('./tests/w'),
},
##
{'id': fix_path('./tests/x'),
'kind': 'folder',
'name': 'x',
'parentid': fix_path('./tests'),
},
###
{'id': fix_path('./tests/x/y'),
'kind': 'folder',
'name': 'y',
'parentid': fix_path('./tests/x'),
},
####
{'id': fix_path('./tests/x/y/z'),
'kind': 'folder',
'name': 'z',
'parentid': fix_path('./tests/x/y'),
},
#####
{'id': fix_path('./tests/x/y/z/a'),
'kind': 'folder',
'name': 'a',
'parentid': fix_path('./tests/x/y/z'),
},
##### +++
{'id': fix_path('./tests/x/y/z/a/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/x/y/z/a'),
},
#####
{'id': fix_path('./tests/x/y/z/b'),
'kind': 'folder',
'name': 'b',
'parentid': fix_path('./tests/x/y/z'),
},
##### +++
{'id': fix_path('./tests/x/y/z/b/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/x/y/z/b'),
},
#### +++
{'id': fix_path('./tests/x/y/z/test_ham.py'),
'kind': 'file',
'name': 'test_ham.py',
'parentid': fix_path('./tests/x/y/z'),
},
],
'tests': [
##########
{'id': fix_path('./tests/test_42-43.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_42-43.py:2'),
'markers': [],
'parentid': fix_path('./tests/test_42-43.py'),
},
#####
{'id': fix_path('./tests/test_42.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_42.py:2'),
'markers': [],
'parentid': fix_path('./tests/test_42.py'),
},
#####
{'id': fix_path('./tests/test_doctest.txt::test_doctest.txt'),
'name': 'test_doctest.txt',
'source': fix_path('./tests/test_doctest.txt:1'),
'markers': [],
'parentid': fix_path('./tests/test_doctest.txt'),
},
#####
{'id': fix_path('./tests/test_foo.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_foo.py:3'),
'markers': [],
'parentid': fix_path('./tests/test_foo.py'),
},
#####
{'id': fix_path('./tests/test_mixed.py::test_top_level'),
'name': 'test_top_level',
'source': fix_path('./tests/test_mixed.py:5'),
'markers': [],
'parentid': fix_path('./tests/test_mixed.py'),
},
{'id': fix_path('./tests/test_mixed.py::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_mixed.py:9'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_mixed.py'),
},
{'id': fix_path('./tests/test_mixed.py::TestMySuite::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_mixed.py:16'),
'markers': [],
'parentid': fix_path('./tests/test_mixed.py::TestMySuite'),
},
{'id': fix_path('./tests/test_mixed.py::MyTests::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_mixed.py:22'),
'markers': [],
'parentid': fix_path('./tests/test_mixed.py::MyTests'),
},
{'id': fix_path('./tests/test_mixed.py::MyTests::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_mixed.py:25'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_mixed.py::MyTests'),
},
#####
{'id': fix_path('./tests/test_pytest.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:6'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_failure'),
'name': 'test_failure',
'source': fix_path('./tests/test_pytest.py:10'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_runtime_skipped'),
'name': 'test_runtime_skipped',
'source': fix_path('./tests/test_pytest.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_runtime_failed'),
'name': 'test_runtime_failed',
'source': fix_path('./tests/test_pytest.py:18'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_raises'),
'name': 'test_raises',
'source': fix_path('./tests/test_pytest.py:22'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_pytest.py:26'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_maybe_skipped'),
'name': 'test_maybe_skipped',
'source': fix_path('./tests/test_pytest.py:31'),
'markers': ['skip-if'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_known_failure'),
'name': 'test_known_failure',
'source': fix_path('./tests/test_pytest.py:36'),
'markers': ['expected-failure'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_warned'),
'name': 'test_warned',
'source': fix_path('./tests/test_pytest.py:41'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_custom_marker'),
'name': 'test_custom_marker',
'source': fix_path('./tests/test_pytest.py:46'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_multiple_markers'),
'name': 'test_multiple_markers',
'source': fix_path('./tests/test_pytest.py:51'),
'markers': ['expected-failure', 'skip', 'skip-if'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_dynamic_1'),
'name': 'test_dynamic_1',
'source': fix_path('./tests/test_pytest.py:62'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_dynamic_2'),
'name': 'test_dynamic_2',
'source': fix_path('./tests/test_pytest.py:62'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_dynamic_3'),
'name': 'test_dynamic_3',
'source': fix_path('./tests/test_pytest.py:62'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:70'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestSpam'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_pytest.py:73'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::TestSpam'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::TestHam::TestEggs::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:81'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestSpam::TestHam::TestEggs'),
},
{'id': fix_path('./tests/test_pytest.py::TestEggs::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:93'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestEggs'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_01[]'),
'name': 'test_param_01[]',
'source': fix_path('./tests/test_pytest.py:103'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_01'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_11[x0]'),
'name': 'test_param_11[x0]',
'source': fix_path('./tests/test_pytest.py:108'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_11'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest.py:113'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest.py:113'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest.py:113'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat[x0]'),
'name': 'test_param_13_repeat[x0]',
'source': fix_path('./tests/test_pytest.py:118'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat[x1]'),
'name': 'test_param_13_repeat[x1]',
'source': fix_path('./tests/test_pytest.py:118'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat[x2]'),
'name': 'test_param_13_repeat[x2]',
'source': fix_path('./tests/test_pytest.py:118'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33[1-1-1]'),
'name': 'test_param_33[1-1-1]',
'source': fix_path('./tests/test_pytest.py:123'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33[3-4-5]'),
'name': 'test_param_33[3-4-5]',
'source': fix_path('./tests/test_pytest.py:123'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33[0-0-0]'),
'name': 'test_param_33[0-0-0]',
'source': fix_path('./tests/test_pytest.py:123'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids[v1]'),
'name': 'test_param_33_ids[v1]',
'source': fix_path('./tests/test_pytest.py:128'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33_ids'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids[v2]'),
'name': 'test_param_33_ids[v2]',
'source': fix_path('./tests/test_pytest.py:128'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33_ids'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids[v3]'),
'name': 'test_param_33_ids[v3]',
'source': fix_path('./tests/test_pytest.py:128'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33_ids'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[1-1-z0]'),
'name': 'test_param_23_13[1-1-z0]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[1-1-z1]'),
'name': 'test_param_23_13[1-1-z1]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[1-1-z2]'),
'name': 'test_param_23_13[1-1-z2]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[3-4-z0]'),
'name': 'test_param_23_13[3-4-z0]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[3-4-z1]'),
'name': 'test_param_23_13[3-4-z1]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[3-4-z2]'),
'name': 'test_param_23_13[3-4-z2]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[0-0-z0]'),
'name': 'test_param_23_13[0-0-z0]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[0-0-z1]'),
'name': 'test_param_23_13[0-0-z1]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[0-0-z2]'),
'name': 'test_param_23_13[0-0-z2]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers[x0]'),
'name': 'test_param_13_markers[x0]',
'source': fix_path('./tests/test_pytest.py:140'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_markers'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers[???]'),
'name': 'test_param_13_markers[???]',
'source': fix_path('./tests/test_pytest.py:140'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_markers'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers[2]'),
'name': 'test_param_13_markers[2]',
'source': fix_path('./tests/test_pytest.py:140'),
'markers': ['expected-failure'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_markers'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped[x0]'),
'name': 'test_param_13_skipped[x0]',
'source': fix_path('./tests/test_pytest.py:149'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped[x1]'),
'name': 'test_param_13_skipped[x1]',
'source': fix_path('./tests/test_pytest.py:149'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped[x2]'),
'name': 'test_param_13_skipped[x2]',
'source': fix_path('./tests/test_pytest.py:149'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises[1-None]'),
'name': 'test_param_23_raises[1-None]',
'source': fix_path('./tests/test_pytest.py:155'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_raises'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises[1.0-None]'),
'name': 'test_param_23_raises[1.0-None]',
'source': fix_path('./tests/test_pytest.py:155'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_raises'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises[2-catch2]'),
'name': 'test_param_23_raises[2-catch2]',
'source': fix_path('./tests/test_pytest.py:155'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_raises'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:164'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest.py:167'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest.py:167'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest.py:167'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest.py:175'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest.py:175'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest.py:175'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13[x0]'),
'name': 'test_spam_13[x0]',
'source': fix_path('./tests/test_pytest.py:178'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13[x1]'),
'name': 'test_spam_13[x1]',
'source': fix_path('./tests/test_pytest.py:178'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13[x2]'),
'name': 'test_spam_13[x2]',
'source': fix_path('./tests/test_pytest.py:178'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture'),
'name': 'test_fixture',
'source': fix_path('./tests/test_pytest.py:192'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_mark_fixture'),
'name': 'test_mark_fixture',
'source': fix_path('./tests/test_pytest.py:196'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture[x0]'),
'name': 'test_param_fixture[x0]',
'source': fix_path('./tests/test_pytest.py:201'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture[x1]'),
'name': 'test_param_fixture[x1]',
'source': fix_path('./tests/test_pytest.py:201'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture[x2]'),
'name': 'test_param_fixture[x2]',
'source': fix_path('./tests/test_pytest.py:201'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture[x0]'),
'name': 'test_param_mark_fixture[x0]',
'source': fix_path('./tests/test_pytest.py:207'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture[x1]'),
'name': 'test_param_mark_fixture[x1]',
'source': fix_path('./tests/test_pytest.py:207'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture[x2]'),
'name': 'test_param_mark_fixture[x2]',
'source': fix_path('./tests/test_pytest.py:207'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture_param[spam]'),
'name': 'test_fixture_param[spam]',
'source': fix_path('./tests/test_pytest.py:216'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_fixture_param'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture_param[eggs]'),
'name': 'test_fixture_param[eggs]',
'source': fix_path('./tests/test_pytest.py:216'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_fixture_param'),
},
######
{'id': fix_path('./tests/test_pytest_param.py::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest_param.py:8'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest_param.py:8'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest_param.py:8'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest_param.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest_param.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest_param.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13[x0]'),
'name': 'test_spam_13[x0]',
'source': fix_path('./tests/test_pytest_param.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13[x1]'),
'name': 'test_spam_13[x1]',
'source': fix_path('./tests/test_pytest_param.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13[x2]'),
'name': 'test_spam_13[x2]',
'source': fix_path('./tests/test_pytest_param.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
},
######
{'id': fix_path('./tests/test_unittest.py::MyTests::test_dynamic_'),
'name': 'test_dynamic_',
'source': fix_path('./tests/test_unittest.py:54'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_failure'),
'name': 'test_failure',
'source': fix_path('./tests/test_unittest.py:34'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_known_failure'),
'name': 'test_known_failure',
'source': fix_path('./tests/test_unittest.py:37'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_maybe_not_skipped'),
'name': 'test_maybe_not_skipped',
'source': fix_path('./tests/test_unittest.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_maybe_skipped'),
'name': 'test_maybe_skipped',
'source': fix_path('./tests/test_unittest.py:13'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_unittest.py:6'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_unittest.py:9'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_skipped_inside'),
'name': 'test_skipped_inside',
'source': fix_path('./tests/test_unittest.py:21'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_with_nested_subtests'),
'name': 'test_with_nested_subtests',
'source': fix_path('./tests/test_unittest.py:46'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_with_subtests'),
'name': 'test_with_subtests',
'source': fix_path('./tests/test_unittest.py:41'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::OtherTests::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_unittest.py:61'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::OtherTests'),
},
###########
{'id': fix_path('./tests/v/test_eggs.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_eggs.py'),
},
{'id': fix_path('./tests/v/test_eggs.py::TestSimple::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:8'),
'markers': [],
'parentid': fix_path('./tests/v/test_eggs.py::TestSimple'),
},
######
{'id': fix_path('./tests/v/test_ham.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_ham.py'),
},
{'id': fix_path('./tests/v/test_ham.py::test_not_hard'),
'name': 'test_not_hard',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_ham.py'),
},
######
{'id': fix_path('./tests/v/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_spam.py'),
},
{'id': fix_path('./tests/v/test_spam.py::test_simpler'),
'name': 'test_simpler',
'source': fix_path('./tests/v/test_spam.py:4'),
'markers': [],
'parentid': fix_path('./tests/v/test_spam.py'),
},
###########
{'id': fix_path('./tests/w/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/w/test_spam.py:4'),
'markers': [],
'parentid': fix_path('./tests/w/test_spam.py'),
},
{'id': fix_path('./tests/w/test_spam_ex.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/w/test_spam_ex.py:4'),
'markers': [],
'parentid': fix_path('./tests/w/test_spam_ex.py'),
},
###########
{'id': fix_path('./tests/x/y/z/test_ham.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/x/y/z/test_ham.py:2'),
'markers': [],
'parentid': fix_path('./tests/x/y/z/test_ham.py'),
},
######
{'id': fix_path('./tests/x/y/z/a/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/x/y/z/a/test_spam.py:11'),
'markers': [],
'parentid': fix_path('./tests/x/y/z/a/test_spam.py'),
},
{'id': fix_path('./tests/x/y/z/b/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/x/y/z/b/test_spam.py:7'),
'markers': [],
'parentid': fix_path('./tests/x/y/z/b/test_spam.py'),
},
],
}
| [((18, 6, 18, 17), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((20, 9, 20, 59), 'os.path.join', 'os.path.join', ({(20, 22, 20, 40): 'TESTING_TOOLS_ROOT', (20, 42, 20, 58): '"""run_adapter.py"""'}, {}), "(TESTING_TOOLS_ROOT, 'run_adapter.py')", False, 'import os\n'), ((19, 24, 19, 49), 'os.path.dirname', 'os.path.dirname', ({(19, 40, 19, 48): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((24, 15, 24, 43), 'os.path.join', 'os.path.join', ({(24, 28, 24, 36): 'DATA_DIR', (24, 38, 24, 42): 'name'}, {}), '(DATA_DIR, name)', False, 'import os\n'), ((49, 11, 51, 42), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import subprocess\n'), ((25, 21, 25, 52), 'os.path.join', 'os.path.join', ({(25, 34, 25, 42): 'projroot', (25, 44, 25, 51): '"""tests"""'}, {}), "(projroot, 'tests')", False, 'import os\n'), ((100, 17, 100, 32), 'json.loads', 'json.loads', ({(100, 28, 100, 31): 'out'}, {}), '(out)', False, 'import json\n'), ((144, 17, 144, 32), 'json.loads', 'json.loads', ({(144, 28, 144, 31): 'out'}, {}), '(out)', False, 'import json\n'), ((214, 17, 214, 32), 'json.loads', 'json.loads', ({(214, 28, 214, 31): 'out'}, {}), '(out)', False, 'import json\n'), ((226, 17, 226, 32), 'json.loads', 'json.loads', ({(226, 28, 226, 31): 'out'}, {}), '(out)', False, 'import json\n')] |
axdahl/SC-MMGP | mmgp/kernels/wavelet_slice.py | c6cd9d9de66bb7074925a4b6485f10a74bdd9f68 | '''
Wavelet kernel
slice allows kernel operation on feature subset
active_dims is iterable of feature dimensions to extract
input_dim must equal dimension defined by active_dims
'''
import numpy as np
import tensorflow as tf
from .. import util
from . import kernel
from .kernel_extras import *
class WaveletSlice(kernel.Kernel):
def __init__(self, input_dim, active_dims=None, shift=0, scale = 0.01,
white=0.01, input_scaling=False):
if input_scaling:
self.shift = tf.Variable(shift * tf.ones([input_dim]))
self.scale = tf.Variable(scale * tf.ones([input_dim]))
else:
self.shift = tf.Variable([shift], dtype=tf.float32)
self.scale = tf.Variable([scale], dtype=tf.float32)
self.input_dim = input_dim
self.active_dims = active_dims
self.white = white
def kernel(self, points1, points2=None):
if points2 is None:
points2 = points1
white_noise = (self.white * util.eye(tf.shape(points1)[0]) +
0.1 * self.white * tf.ones( [tf.shape(points1)[0], tf.shape(points1)[0]]))
else:
white_noise = 0.01 * self.white * tf.ones( [tf.shape(points1)[0], tf.shape(points2)[0]] )
points1, points2 = dim_slice(self, points1, points2)
def h(x):
# Zhang wavelet
#return tf.cos(1.75*x)*tf.exp(-0.5*x**2)
# mexican hat wavelet
return (1-x**2)*tf.exp(-0.5*x**2)
kern1, kern2 = h((points1 - self.shift)/tf.exp(self.scale)), h((points2 - self.shift)/tf.exp(self.scale))
kern1, kern2 = tf.reduce_prod(kern1, axis=1), tf.reduce_prod(kern2, axis=1)
kern = tf.einsum('i,j->ij', kern1, kern2)
return kern + white_noise
def diag_kernel(self, points):
def h(x):
# Zhang wavelet
return tf.cos(1.75*x)*tf.exp(-0.5*x**2)
# mexican hat wavelet
#return (1-x**2)*tf.exp(-0.5*x**2)
points = dim_slice_diag(self, points)
kern = tf.reduce_prod(h((points - self.shift)/tf.exp(self.scale)) , axis=1) **2
return kern + self.white
def get_params(self):
return [self.shift, self.scale]
| [((48, 15, 48, 49), 'tensorflow.einsum', 'tf.einsum', ({(48, 25, 48, 34): '"""i,j->ij"""', (48, 36, 48, 41): 'kern1', (48, 43, 48, 48): 'kern2'}, {}), "('i,j->ij', kern1, kern2)", True, 'import tensorflow as tf\n'), ((24, 25, 24, 63), 'tensorflow.Variable', 'tf.Variable', (), '', True, 'import tensorflow as tf\n'), ((25, 25, 25, 63), 'tensorflow.Variable', 'tf.Variable', (), '', True, 'import tensorflow as tf\n'), ((47, 23, 47, 52), 'tensorflow.reduce_prod', 'tf.reduce_prod', (), '', True, 'import tensorflow as tf\n'), ((47, 54, 47, 83), 'tensorflow.reduce_prod', 'tf.reduce_prod', (), '', True, 'import tensorflow as tf\n'), ((44, 28, 44, 45), 'tensorflow.exp', 'tf.exp', ({(44, 35, 44, 44): '(-0.5 * x ** 2)'}, {}), '(-0.5 * x ** 2)', True, 'import tensorflow as tf\n'), ((54, 19, 54, 33), 'tensorflow.cos', 'tf.cos', ({(54, 26, 54, 32): '(1.75 * x)'}, {}), '(1.75 * x)', True, 'import tensorflow as tf\n'), ((54, 34, 54, 51), 'tensorflow.exp', 'tf.exp', ({(54, 41, 54, 50): '(-0.5 * x ** 2)'}, {}), '(-0.5 * x ** 2)', True, 'import tensorflow as tf\n'), ((21, 45, 21, 65), 'tensorflow.ones', 'tf.ones', ({(21, 53, 21, 64): '[input_dim]'}, {}), '([input_dim])', True, 'import tensorflow as tf\n'), ((22, 45, 22, 65), 'tensorflow.ones', 'tf.ones', ({(22, 53, 22, 64): '[input_dim]'}, {}), '([input_dim])', True, 'import tensorflow as tf\n'), ((46, 48, 46, 66), 'tensorflow.exp', 'tf.exp', ({(46, 55, 46, 65): 'self.scale'}, {}), '(self.scale)', True, 'import tensorflow as tf\n'), ((46, 94, 46, 112), 'tensorflow.exp', 'tf.exp', ({(46, 101, 46, 111): 'self.scale'}, {}), '(self.scale)', True, 'import tensorflow as tf\n'), ((59, 54, 59, 72), 'tensorflow.exp', 'tf.exp', ({(59, 61, 59, 71): 'self.scale'}, {}), '(self.scale)', True, 'import tensorflow as tf\n'), ((33, 49, 33, 66), 'tensorflow.shape', 'tf.shape', ({(33, 58, 33, 65): 'points1'}, {}), '(points1)', True, 'import tensorflow as tf\n'), ((36, 56, 36, 73), 'tensorflow.shape', 'tf.shape', ({(36, 65, 36, 72): 'points1'}, {}), '(points1)', True, 'import tensorflow as tf\n'), ((36, 78, 36, 95), 'tensorflow.shape', 'tf.shape', ({(36, 87, 36, 94): 'points2'}, {}), '(points2)', True, 'import tensorflow as tf\n'), ((34, 45, 34, 62), 'tensorflow.shape', 'tf.shape', ({(34, 54, 34, 61): 'points1'}, {}), '(points1)', True, 'import tensorflow as tf\n'), ((34, 67, 34, 84), 'tensorflow.shape', 'tf.shape', ({(34, 76, 34, 83): 'points1'}, {}), '(points1)', True, 'import tensorflow as tf\n')] |
latenite4/python3 | transform.py | 30e367471ba48e5fc0fb07327b636fcb9959e3e0 |
#!/usr/bin/python3
#program to parse png images and change images
# cmd: python3 transform.py
# you must have local input/ and output/ directories
#
# name: R. Melton
# date: 12/27/20
# cmdline: python transform.py cmd show image='city.png' --ulx=1 --uly=2 --brx=0 --bry=9
# python transform.py show city.png
# python transform.py blur city.png
from image import Image
import numpy as np
import time, os, argparse, string
#from tkinter import *
import imghdr
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
def adjust_brightness(image,factor):
#scale each value by some amount
x_pixels, y_pixels,num_channels = image.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x,y,c] = image.array[x,y,c] * factor #non vectorized version
#vectorized version
# new_im.array = image.array * factor -# this is faster
return new_im
#adjust the contrast by increasing difference from user
#defined midpoint
def adjust_contrast(image, factor, mid=0.5):
x_pixels, y_pixels,num_channels = image.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x,y,c] = (image.array[x,y,c] -mid)* factor + mid #non vectorized version
#vectorized version
# new_im.array = (image.array - mid) * factor + mid
return new_im
# blur and image
def blur(image, k_size):
#k_size is the number of pixels to use when doing the blur
#k_size=3 would be above and below and left neighbor, right neighbor pixels, and diagonal
#neighbor pixels.
im = Image(filename = image)
x_pixels, y_pixels,num_channels = im.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
neighbor_range = k_size // 2
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
total = 0
for x_i in range(max(0,x-neighbor_range), min(new_im.x_pixels-1, x+neighbor_range)+1):
for y_i in range(max(0,y-neighbor_range), min(new_im.y_pixels-1, y+neighbor_range)+1):
total += image.array[x_i, y_i, c]
new_im.array[x,y,c] = total / (k_size **2) # average for kernel size in image
return new_im
def apply_kernel(image, kernel):
# the kernel should be a 2D array that represents the kernel we'll use!
# for the sake of simiplicity of this implementation, let's assume that the kernel is SQUARE
# for example the sobel x kernel (detecting horizontal edges) is as follows:
# [1 0 -1]
# [2 0 -2]
# [1 0 -1]
x_pixels, y_pixels, num_channels = image.array.shape # represents x, y pixels of image, # channels (R, G, B)
new_im = Image(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels) # making a new array to copy values to!
neighbor_range = kernel.shape[0] // 2 # this is a variable that tells us how many neighbors we actually look at (ie for a 3x3 kernel, this value should be 1)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
total = 0
for x_i in range(max(0,x-neighbor_range), min(new_im.x_pixels-1, x+neighbor_range)+1):
for y_i in range(max(0,y-neighbor_range), min(new_im.y_pixels-1, y+neighbor_range)+1):
x_k = x_i + neighbor_range - x
y_k = y_i + neighbor_range - y
kernel_val = kernel[x_k, y_k]
total += image.array[x_i, y_i, c] * kernel_val
new_im.array[x, y, c] = total
return new_im
def combine_images(image1, image2):
# let's combine two images using the squared sum of squares: value = sqrt(value_1**2, value_2**2)
# size of image1 and image2 MUST be the same
x_pixels, y_pixels, num_channels = image1.array.shape # represents x, y pixels of image, # channels (R, G, B)
new_im = Image(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels) # making a new array to copy values to!
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x, y, c] = (image1.array[x, y, c]**2 + image2.array[x, y, c]**2)**0.5
return new_im
def show_image(in_image):
path="input/"
img = mpimg.imread(path+in_image)
imgplot = plt.imshow(img)
plt.show()
# check for necessary parts of the runtime environment
def check_env( in_image):
#check to verify that output/input dirs exist:
path = './output/'
is_path = os.path.isdir(path)
if not is_path:
print('local ./output dir must exist, cannot continue...')
print(quit)
quit()
#verify output is writeable
is_w = os.access(path, os.W_OK)
if not is_w:
print('local ./output dir must be writeable, cannot continue...')
print(quit)
quit()
path = './input/'
is_path = os.path.isdir(path)
if not is_path:
print('local ./input dir must exist, cannot continue...')
print(quit)
quit()
#verify input image
if in_image:
thefile = 'input/'+in_image
print('file path: '+thefile)
is_file = os.path.isfile(thefile)
if not is_file:
print(f'local ./input file {in_image} must exist, cannot continue...')
print(quit)
quit()
if imghdr.what(thefile) != 'png':
print('wrong image file type, cannot continue...')
print(quit)
quit()
def cmd():
print("routine cmd")
# setup command line args and parms
# optional args have --
# fixed (required args do not have --)
def arg_init():
parser = argparse.ArgumentParser(description='Process an image.')
parser.add_argument("cmd",help="command to this program",type=str)
parser.add_argument("image",help="input image name for the command",type=str)
parser.add_argument("--ulx",action='store_true',help="upperleft x in image")
parser.add_argument("--uly",action='store_true',help="upperleft y in image")
parser.add_argument("--brx",action='store_true',help="bottomright x in image")
parser.add_argument("--bry",action='store_true',help="bottomright y in image")
group = parser.add_mutually_exclusive_group()
group.add_argument('--v', action='store_true',help="add more text output")
group.add_argument('--q', action='store_true',help="minimal output")
args = parser.parse_args()
print(args.image)
#if args.cmd != "show" and args.cmd != "blur":
return args
#def show_image(filename):
if __name__ == '__main__':
args = arg_init()
check_env(args.image)
lake = Image(filename = 'lake.png')
city = Image(filename='city.png')
start_time = time.time()
# brightened_im = adjust_brightness(lake, 1.7)
# brightened_im.write_image('brightened.png')
# darkened_im = adjust_brightness(lake, 0.3)
# darkened_im.write_image('darkened.png')
# incr_contrast = adjust_contrast(lake, 2,0.5)
# incr_contrast.write_image('incr_contrast.png')
# decr_contrast = adjust_contrast(lake, 0.5,0.5)
# decr_contrast.write_image('decr_contrast.png')
# blur_3 = blur(city,3)
# blur_3.write_image('blur_k3.png')
# blur_15 = blur(city,15)
# blur_15.write_image('blur_k15.png')
# let's apply a sobel kernel on the x and y axis
# sobel_x = apply_kernel(city, np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]))
# sobel_x.write_image('edge_x.png')
# sobel_y = apply_kernel(city, np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]))
# sobel_y.write_image('edge_y.png')
# # this will show x and y edges
# sobel_xy = combine_images(sobel_x, sobel_y)
# sobel_xy.write_image('edge_xy.png')
if args.cmd == "show" and args.image:
show_image(args.image)
if args.cmd == "blur" and args.image:
blur_15 = blur(args.image,15)
blur_15.write_image(args.image+'blur_k15.png')
show_image(blur_k15.png)
if args.v:
print(f'total execution duration: {time.time() - start_time}s')
| [((24, 11, 24, 79), 'image.Image', 'Image', (), '', False, 'from image import Image\n'), ((39, 11, 39, 79), 'image.Image', 'Image', (), '', False, 'from image import Image\n'), ((55, 7, 55, 30), 'image.Image', 'Image', (), '', False, 'from image import Image\n'), ((58, 11, 58, 79), 'image.Image', 'Image', (), '', False, 'from image import Image\n'), ((80, 13, 80, 83), 'image.Image', 'Image', (), '', False, 'from image import Image\n'), ((100, 13, 100, 83), 'image.Image', 'Image', (), '', False, 'from image import Image\n'), ((110, 8, 110, 35), 'matplotlib.image.imread', 'mpimg.imread', ({(110, 21, 110, 34): 'path + in_image'}, {}), '(path + in_image)', True, 'import matplotlib.image as mpimg\n'), ((111, 12, 111, 27), 'matplotlib.pyplot.imshow', 'plt.imshow', ({(111, 23, 111, 26): 'img'}, {}), '(img)', True, 'import matplotlib.pyplot as plt\n'), ((112, 2, 112, 12), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((118, 12, 118, 31), 'os.path.isdir', 'os.path.isdir', ({(118, 26, 118, 30): 'path'}, {}), '(path)', False, 'import time, os, argparse, string\n'), ((124, 9, 124, 33), 'os.access', 'os.access', ({(124, 19, 124, 23): 'path', (124, 25, 124, 32): 'os.W_OK'}, {}), '(path, os.W_OK)', False, 'import time, os, argparse, string\n'), ((131, 12, 131, 31), 'os.path.isdir', 'os.path.isdir', ({(131, 26, 131, 30): 'path'}, {}), '(path)', False, 'import time, os, argparse, string\n'), ((160, 11, 160, 67), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import time, os, argparse, string\n'), ((190, 9, 190, 37), 'image.Image', 'Image', (), '', False, 'from image import Image\n'), ((191, 9, 191, 35), 'image.Image', 'Image', (), '', False, 'from image import Image\n'), ((192, 15, 192, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time, os, argparse, string\n'), ((141, 14, 141, 37), 'os.path.isfile', 'os.path.isfile', ({(141, 29, 141, 36): 'thefile'}, {}), '(thefile)', False, 'import time, os, argparse, string\n'), ((147, 5, 147, 25), 'imghdr.what', 'imghdr.what', ({(147, 17, 147, 24): 'thefile'}, {}), '(thefile)', False, 'import imghdr\n'), ((228, 39, 228, 50), 'time.time', 'time.time', ({}, {}), '()', False, 'import time, os, argparse, string\n')] |
Jeglet/pcbot | plugins/wyr.py | 89178d4982151adb2fadfacdc3080e46cda9e891 | """ Would you rather? This plugin includes would you rather functionality
"""
import asyncio
import random
import re
import discord
import bot
import plugins
from pcbot import Config
client = plugins.client # type: bot.Client
db = Config("would-you-rather", data=dict(timeout=10, responses=["**{name}** would **{choice}**!"], questions=[]),
pretty=True)
command_pattern = re.compile(r"(.+)(?:\s+or|\s*,)\s+([^?]+)\?*")
sessions = set() # All running would you rather's are in this set
@plugins.argument("{open}option ...{close} or/, {open}other option ...{close}[?]", allow_spaces=True)
async def options(arg):
""" Command argument for receiving two options. """
match = command_pattern.match(arg)
assert match
assert not match.group(1).lower() == match.group(2).lower(), "**The choices cannot be the same.**"
return match.group(1), match.group(2)
def get_choice(choices: list, choice: str):
""" Get the chosen option. This accept 1 and 2 as numbers. """
if choice == "1":
return 0
if choice == "2":
return 1
choices = list(map(str.lower, choices))
words = list(map(str.split, choices))
# Go through all words in the given message, and find any words unique to a choice
for word in choice.lower().split():
if word in words[0] and word not in words[1]:
return 0
elif word in words[1] and word not in words[0]:
return 1
# Invalid choice
return None
@plugins.command(aliases="wyr rather either")
async def wouldyourather(message: discord.Message, opt: options = None):
""" Ask the bot if he would rather, or have the bot ask you.
**Examples:**
Registering a choice: `!wouldyourather lie or be lied to`
Asking the bot: `!wouldyourather`"""
# If there are no options, the bot will ask the questions (if there are any to choose from)
if opt is None:
assert message.channel.id not in sessions, "**A would you rather session is already in progress.**"
sessions.add(message.channel.id)
assert db.data["questions"], "**There are ZERO questions saved. Ask me one!**"
question = random.choice(db.data["questions"])
choices = question["choices"]
await client.say(message, "Would you rather **{}** or **{}**?".format(*choices))
timeout = db.data["timeout"]
replied = []
# Wait for replies from anyone in the channel
while True:
def check(m):
return m.channel == message.channel and m.author not in replied
try:
reply = await client.wait_for_message(timeout=timeout, check=check)
# Break on timeout
except asyncio.TimeoutError:
break
# Check if the choice is valid
choice = get_choice(choices, reply.content)
if choice is None:
continue
# Register that this author has replied
replied.append(reply.author)
# Update the answers in the DB
# We don't care about multiples, just the amount (yes it will probably be biased)
question["answers"][choice] += 1
name = reply.author.display_name
response = random.choice(db.data["responses"]).format(name=name, NAME=name.upper(),
choice=choices[choice])
await client.say(message, response)
# Say the total tallies
await client.say(message, "A total of {0} would **{2}**, while {1} would **{3}**!".format(
*question["answers"], *choices))
await db.asyncsave()
sessions.remove(message.channel.id)
# Otherwise, the member asked a question to the bot
else:
db.data["questions"].append(dict(
choices=list(opt),
answers=[0, 0]
))
await db.asyncsave()
answer = random.choice(opt)
await client.say(message, "**I would {}**!".format(answer))
@wouldyourather.command(aliases="delete", owner=True)
async def remove(message: discord.Message, opt: options):
""" Remove a wouldyourather question with the given options. """
for q in db.data["questions"]:
if q["choices"][0] == opt[0] and q["choices"][1] == opt[1]:
db.data["questions"].remove(q)
await db.asyncsave()
await client.say(message, "**Entry removed.**")
break
else:
await client.say(message, "**Could not find the question.**")
| [((17, 18, 17, 64), 're.compile', 're.compile', ({(17, 29, 17, 63): '"""(.+)(?:\\\\s+or|\\\\s*,)\\\\s+([^?]+)\\\\?*"""'}, {}), "('(.+)(?:\\\\s+or|\\\\s*,)\\\\s+([^?]+)\\\\?*')", False, 'import re\n'), ((21, 1, 21, 101), 'plugins.argument', 'plugins.argument', (), '', False, 'import plugins\n'), ((53, 1, 53, 45), 'plugins.command', 'plugins.command', (), '', False, 'import plugins\n'), ((69, 19, 69, 54), 'random.choice', 'random.choice', ({(69, 33, 69, 53): "db.data['questions']"}, {}), "(db.data['questions'])", False, 'import random\n'), ((118, 17, 118, 35), 'random.choice', 'random.choice', ({(118, 31, 118, 34): 'opt'}, {}), '(opt)', False, 'import random\n'), ((100, 23, 100, 58), 'random.choice', 'random.choice', ({(100, 37, 100, 57): "db.data['responses']"}, {}), "(db.data['responses'])", False, 'import random\n')] |
bergzand/suit-manifest-generator | suit_tool/argparser.py | da82651a8b02fd4d7261e826cc70b5c862dd94ea | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright 2019-2020 ARM Limited or its affiliates
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import sys, argparse, os
from suit_tool import __version__
from suit_tool import keygen
from suit_tool import get_pubkey
import json
import re
def str_to_component(s):
types = {
'file' : ('file', lambda x : str(x.strip('"'))),
# 'desc' : ('component-description', lambda x : str(x.strip('"'))),
'inst' : ('install-id', lambda x : [ str(y) for y in eval(x) ]),
'uri' : ('uri', lambda x : str(x.strip('"')))
}
d = {types[k][0]:types[k][1](v) for k,v in [ re.split(r'=',e, maxsplit=1) for e in re.split(r''',\s*(?=["']?[a-zA-Z0-9_-]+["']?=)''', s)]}
return d
class MainArgumentParser(object):
def __init__(self):
self.parser = self._make_parser()
def _make_parser(self):
parser = argparse.ArgumentParser(description = 'Create or transform a manifest.'
' Use {} [command] -h for help on each command.'.format(sys.argv[0]))
# Add all top-level commands
parser.add_argument('-l', '--log-level', choices=['debug','info','warning','exception'], default='info',
help='Set the verbosity level of console output.')
parser.add_argument('--version', action='version', version=__version__,
help='display the version'
)
subparsers = parser.add_subparsers(dest="action")
subparsers.required = True
create_parser = subparsers.add_parser('create', help='Create a new manifest')
# create_parser.add_argument('-v', '--manifest-version', choices=['1'], default='1')
create_parser.add_argument('-i', '--input-file', metavar='FILE', type=argparse.FileType('r'),
help='An input file describing the update. The file must be formated as JSON. The overal structure is described in README.')
create_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), required=True)
create_parser.add_argument('-f', '--format', metavar='FMT', choices=['suit', 'suit-debug', 'json'], default='suit')
create_parser.add_argument('-s', '--severable', action='store_true', help='Convert large elements to severable fields.')
create_parser.add_argument('-c', '--add-component', action='append', type=str_to_component, dest='components', default=[])
sign_parser = subparsers.add_parser('sign', help='Sign a manifest')
sign_parser.add_argument('-m', '--manifest', metavar='FILE', type=argparse.FileType('rb'), required=True)
sign_parser.add_argument('-k', '--private-key', metavar='FILE', type=argparse.FileType('rb'), required=True)
sign_parser.add_argument('-i', '--key-id', metavar='ID', type=str)
sign_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), required=True)
parse_parser = subparsers.add_parser('parse', help='Parse a manifest')
parse_parser.add_argument('-m', '--manifest', metavar='FILE', type=argparse.FileType('rb'), required=True)
parse_parser.add_argument('-j', '--json-output', default=False, action='store_true', dest='json')
get_pubkey_parser = subparsers.add_parser('pubkey', help='Get the public key for a supplied private key.')
get_pubkey_parser.add_argument('-k', '--private-key', metavar='FILE', type=argparse.FileType('rb'), required=True)
get_pubkey_parser.add_argument('-f', '--output-format', choices=get_pubkey.OutputFormaters.keys(), default='pem')
get_pubkey_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), default=sys.stdout)
keygen_parser = subparsers.add_parser('keygen', help='Create a signing key. Not for production use')
keygen_parser.add_argument('-t', '--type', choices=keygen.KeyGenerators.keys(),
default='secp256r1', help='The type of the key to generate')
keygen_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), default=sys.stdout)
keygen_parser.add_argument('-f', '--output-format', choices=keygen.OutputFormaters.keys(), default='pem')
keygen_parser.add_argument('-l', '--levels', help='The number of hss-lms levels', type=int, default=2)
sever_parser = subparsers.add_parser('sever', help='Remove one or more severable elements from the manifest, if present.')
sever_parser.add_argument('-m', '--manifest', metavar='FILE', type=argparse.FileType('rb'), required=True)
sever_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), required=True)
sever_parser.add_argument('-e', '--element', action='append', type=str, dest='elements', default=[])
sever_parser.add_argument('-a', '--all', action='store_true', default=False)
return parser
def parse_args(self, args=None):
self.options = self.parser.parse_args(args)
return self
| [((33, 49, 33, 77), 're.split', 're.split', (), '', False, 'import re\n'), ((56, 78, 56, 100), 'argparse.FileType', 'argparse.FileType', ({(56, 96, 56, 99): '"""r"""'}, {}), "('r')", False, 'import sys, argparse, os\n'), ((58, 79, 58, 102), 'argparse.FileType', 'argparse.FileType', ({(58, 97, 58, 101): '"""wb"""'}, {}), "('wb')", False, 'import sys, argparse, os\n'), ((65, 74, 65, 97), 'argparse.FileType', 'argparse.FileType', ({(65, 92, 65, 96): '"""rb"""'}, {}), "('rb')", False, 'import sys, argparse, os\n'), ((66, 77, 66, 100), 'argparse.FileType', 'argparse.FileType', ({(66, 95, 66, 99): '"""rb"""'}, {}), "('rb')", False, 'import sys, argparse, os\n'), ((68, 77, 68, 100), 'argparse.FileType', 'argparse.FileType', ({(68, 95, 68, 99): '"""wb"""'}, {}), "('wb')", False, 'import sys, argparse, os\n'), ((72, 75, 72, 98), 'argparse.FileType', 'argparse.FileType', ({(72, 93, 72, 97): '"""rb"""'}, {}), "('rb')", False, 'import sys, argparse, os\n'), ((77, 83, 77, 106), 'argparse.FileType', 'argparse.FileType', ({(77, 101, 77, 105): '"""rb"""'}, {}), "('rb')", False, 'import sys, argparse, os\n'), ((78, 72, 78, 105), 'suit_tool.get_pubkey.OutputFormaters.keys', 'get_pubkey.OutputFormaters.keys', ({}, {}), '()', False, 'from suit_tool import get_pubkey\n'), ((79, 83, 79, 106), 'argparse.FileType', 'argparse.FileType', ({(79, 101, 79, 105): '"""wb"""'}, {}), "('wb')", False, 'import sys, argparse, os\n'), ((83, 59, 83, 86), 'suit_tool.keygen.KeyGenerators.keys', 'keygen.KeyGenerators.keys', ({}, {}), '()', False, 'from suit_tool import keygen\n'), ((85, 79, 85, 102), 'argparse.FileType', 'argparse.FileType', ({(85, 97, 85, 101): '"""wb"""'}, {}), "('wb')", False, 'import sys, argparse, os\n'), ((86, 68, 86, 97), 'suit_tool.keygen.OutputFormaters.keys', 'keygen.OutputFormaters.keys', ({}, {}), '()', False, 'from suit_tool import keygen\n'), ((90, 75, 90, 98), 'argparse.FileType', 'argparse.FileType', ({(90, 93, 90, 97): '"""rb"""'}, {}), "('rb')", False, 'import sys, argparse, os\n'), ((91, 78, 91, 101), 'argparse.FileType', 'argparse.FileType', ({(91, 96, 91, 100): '"""wb"""'}, {}), "('wb')", False, 'import sys, argparse, os\n'), ((33, 87, 33, 140), 're.split', 're.split', ({(33, 96, 33, 136): '""",\\\\s*(?=["\']?[a-zA-Z0-9_-]+["\']?=)"""', (33, 138, 33, 139): 's'}, {}), '(\',\\\\s*(?=["\\\']?[a-zA-Z0-9_-]+["\\\']?=)\', s)', False, 'import re\n')] |
y2ghost/study | python/process/process_pool.py | c5278611b0a732fe19e3d805c0c079e530b1d3b2 | import random
import time
from multiprocessing import Pool
def worker(name: str) -> None:
print(f'Started worker {name}')
worker_time = random.choice(range(1, 5))
time.sleep(worker_time)
print(f'{name} worker finished in {worker_time} seconds')
if __name__ == '__main__':
process_names = [f'computer_{i}' for i in range(15)]
pool = Pool(processes=5)
pool.map(worker, process_names)
# pool.terminate()
| [((9, 4, 9, 27), 'time.sleep', 'time.sleep', ({(9, 15, 9, 26): 'worker_time'}, {}), '(worker_time)', False, 'import time\n'), ((15, 11, 15, 28), 'multiprocessing.Pool', 'Pool', (), '', False, 'from multiprocessing import Pool\n')] |
mazalgarab-git/OSICpypy | Project/Support-NotSourced/generic_pydicom_ns.py | 003fb0b146c9ed711f05475e6cc7563bf549f230 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 11:48:59 2020
@author: mazal
"""
"""
=========================================
Support functions of pydicom (Not sourced)
=========================================
Purpose: Create support functions for the pydicom project
"""
"""
Test mode 1 | Basics
testMode = True
reportMode = False
Test mode 2 | Function Report
testMode = False
reportMode = True
Commisionning mode
testMode = False
reportMode = False
"""
testMode = False
reportMode = False
"""
=========================================
Function 1: Aleatory Sampling
=========================================
Purpose: Build an aleatory sample given a train dataset of Kaggle for competition and a sample size
Raw code reference (see Tester.py): Test 5
"""
def trainDatasetSampler(samplingSize,testMode,reportMode):
# Set sampling size (% of the train population)
samplingSize = 5
# Build a Sampling dataset | Phase 1: Determine: (1) the source path of the train data; (2) the location path of the sampling
import os
import pandas as pd
path_source = 'Y:/Kaggle_OSIC/2-Data/train/'
path_source_test = 'Y:/Kaggle_OSIC/2-Data/test/'
path_destination = 'Y:/Kaggle_OSIC/4-Data (Sampling)/train/'
path_destination_test = 'Y:/Kaggle_OSIC/4-Data (Sampling)/test/'
path_destination_outcome = 'Y:/Kaggle_OSIC/4-Data (Sampling)/outcome/'
# Build a Sampling dataset | Phase 2: Build dataset using the following features from train data: (1) ID; (2) # of DICOM files per ID (including percentage).
## Improvement: (3) # of other registers (not related to DICOM files)
os.chdir(path_source)
ID_list = os.listdir(path_source)
ID_list_range = len(ID_list)
DICOMFile_list = []
DICOMFileNumber_list = []
for i in range(0,ID_list_range):
path_ID = path_source + ID_list[i] + '/'
DICOMFile_list_unitary = os.listdir(path_ID)
DICOMFile_list = DICOMFile_list + [DICOMFile_list_unitary]
DICOMFileNumber_list_unitary = len(DICOMFile_list_unitary)
DICOMFileNumber_list = DICOMFileNumber_list + [DICOMFileNumber_list_unitary]
Population_Dictionary = {'ID':ID_list,'NumberDicomFiles':DICOMFileNumber_list,'DicomFIles':DICOMFile_list}
Population_DataFrame = pd.DataFrame(data = Population_Dictionary)
DICOMFilePercentage_list = []
TotalNumberDicomFiles = sum(Population_DataFrame.NumberDicomFiles)
for j in range(0,ID_list_range):
Percentage = Population_DataFrame['NumberDicomFiles'][j] / TotalNumberDicomFiles * 100
Percentage = round(Percentage,6)
DICOMFilePercentage_list = DICOMFilePercentage_list + [Percentage]
Population_Percentage_Dictionary = {'Percentage':DICOMFilePercentage_list}
Population_Percentage_DataFrame = pd.DataFrame(data=Population_Percentage_Dictionary)
Population_DataFrame = pd.concat([Population_DataFrame, Population_Percentage_DataFrame],axis=1, sort=False)
filename_population = 'populationDataset.csv'
path_population = path_destination_outcome
Population_DataFrame.to_csv(path_population+filename_population)
# Build a Sampling dataset | Phase 3: Get an aleatory grouping of IDs (just tags)
import random
Population_DataFrame_IndexToSample=[]
Population_DataFrame_IDToSample=[]
Population_DataFrame_PercentageToSample=[]
samplingSizeGoal = 0
while (samplingSizeGoal <= samplingSize):
randomNumberTermination = len(Population_DataFrame.ID)
randomNumber = random.randrange(0,randomNumberTermination,1)
if (randomNumber not in Population_DataFrame_IndexToSample):
Population_DataFrame_IndexToSample = Population_DataFrame_IndexToSample + [randomNumber]
ID_unitary = Population_DataFrame.ID[randomNumber]
Population_DataFrame_IDToSample = Population_DataFrame_IDToSample + [ID_unitary]
Percentage_unitary = Population_DataFrame.Percentage[randomNumber]
Population_DataFrame_PercentageToSample = Population_DataFrame_PercentageToSample + [Percentage_unitary]
samplingSize_unitary = Population_DataFrame.Percentage[randomNumber]
samplingSizeGoal = samplingSizeGoal + samplingSize_unitary
samplingDataset_Dictionary = {'Index':Population_DataFrame_IndexToSample,'ID':Population_DataFrame_IDToSample,'Percentage':Population_DataFrame_PercentageToSample}
samplingDataset_DataFrame = pd.DataFrame(data=samplingDataset_Dictionary)
filename_sampling = 'samplingDataset.csv'
path_sampling = path_destination_outcome
samplingDataset_DataFrame.to_csv(path_sampling+filename_sampling)
# Build a Sampling dataset | Phase 3: Get train dataset (an aleatory grouping of IDs; tree-copy task)
from distutils.dir_util import create_tree
from distutils.dir_util import remove_tree
from distutils.dir_util import copy_tree
remove_tree(path_destination)
create_tree(path_destination,[])
if testMode == True:
print("=========================================")
print("Building the Sampling Dataset given the Train Dataset of Kaggle for competition")
print("=========================================")
for k in Population_DataFrame_IDToSample:
path_source_unitary = path_source + k + '/'
path_destination_unitary = path_destination + k + '/'
create_tree(path_destination_unitary,[])
copy_tree(path_source_unitary,path_destination_unitary)
if testMode == True: print("ID tree copied: ",k)
# Build a Sampling dataset | Phase 4: Get test dataset (tree-copy task)
## Assumption: The complete test dataset is copied.
from distutils.dir_util import create_tree
from distutils.dir_util import remove_tree
from distutils.dir_util import copy_tree
remove_tree(path_destination_test)
create_tree(path_destination_test,[])
if testMode == True:
print("=========================================")
print("Building the Test Dataset given the Test Dataset of Kaggle for competition")
print("=========================================")
IDList_test = os.listdir(path_source_test)
for l in IDList_test:
path_source_unitary = path_source + l + '/'
path_destination_unitary = path_destination_test + l + '/'
create_tree(path_destination_unitary,[])
copy_tree(path_source_unitary,path_destination_unitary)
if testMode == True: print("ID tree copied: ",l)
if (testMode == False and reportMode == True):
from datetime import date
reportDate = date.today()
print("=========================================")
print("Function Report | Date:",reportDate.year,'/',reportDate.month,'/',reportDate.day,'/' )
print("=========================================")
print("Function: trainDatasetSampler(samplingSize,testMode)")
print("=========================================")
print("(1) Inputs")
print("=========================================")
print("-Sampling Size :", samplingSize, "%")
print("-Test Mode : False")
print("=========================================")
print("(2) Outputs")
print("=========================================")
print("-Type of sample: Aleatory based on IDs")
print("-Train dataset percentage to sample (base): ", round(abs(samplingSize),6),"%")
print("-Train dataset percentage to sample (adjustment): ", round(abs(samplingSizeGoal-samplingSize),6),"%")
print("-Train dataset percentage to sample (fitted): ", round(samplingSizeGoal,6),"%")
print("-Population of Train dataset (just information) available in file: ", filename_population)
print("-Sample of Train dataset (just information) available in file: ", filename_sampling)
print("=========================================")
print("(2) Outcomes:")
print("=========================================")
print("Being the outcome expressed under the variable result, outcomes are as follows:")
print("result[0] -> Dataframe for Population")
print("result[1] -> Dataframe for Sample")
print("result[2] -> Test Mode")
print("result[3] -> Rerport Mode")
print("=========================================")
return Population_DataFrame, samplingDataset_DataFrame, testMode, reportMode
if testMode == True:
samplingSize = 5
resultFunction1 = trainDatasetSampler(samplingSize,testMode,reportMode)
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[0])
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[1])
print("=========================================")
print("Test result Function 1: Success")
print("=========================================")
"""
=========================================
Function 2: Submission Builder
=========================================
Purpose: Build a submission CSV file
Raw code reference (see Tester.py): Test 8
"""
def SubmissionBuilder(ProductType,filename,testMode):
import os
import pandas as pd
# Set ProductType
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
# Set productType and splitType
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set outcome
path_outcome = path_ProductType + 'outcome/'
# Get raw data as a DataFrame
os.chdir(path_outcome)
rawFile_DataFrame = pd.read_csv('submissionRawFile_2020_09_19.csv')
# Get submission file template as a DataFrame
os.chdir(path_ProductType)
submissionFile_DataFrame = pd.read_csv('sample_submission.csv')
# Get submission data as required in submission file
submissionNumber_range = len(rawFile_DataFrame.index)
IDcases_List = submissionFile_DataFrame.Patient_Week.copy()
IDcases_List = IDcases_List[0:5]
IDcases_List_range = len(IDcases_List)
for i in range (0,IDcases_List_range):
IDcases_List[i] = IDcases_List[i][:-4]
# Get submission data as required in submission file | FVC
FVCDataList = []
for k in range(0,submissionNumber_range):
for j in IDcases_List:
# Get datum in raw data
IDlabel_rawFile = str(j)+str('_FVC')
datum = rawFile_DataFrame[IDlabel_rawFile][k]
datum = round(datum,0)
# Set datum in submission file
FVCDataList = FVCDataList + [datum]
submissionFile_DataFrame['FVC'] = FVCDataList
# Get submission data as required in submission file | Confidence
CONDataList = []
for k in range(0,submissionNumber_range):
for j in IDcases_List:
# Get datum in raw data
IDlabel_rawFile = str(j)+str('_CON')
datum = rawFile_DataFrame[IDlabel_rawFile][k]
datum = round(datum,0)
# Set datum in submission file
CONDataList = CONDataList + [datum]
submissionFile_DataFrame['Confidence'] = CONDataList
# Save file | Get directory
path_destination = path_outcome+'submissions/'
try:
os.chdir(path_destination)
GetCreation = True
except FileNotFoundError:
GetCreation = False
if GetCreation == False:
from distutils.dir_util import mkpath
mkpath(path_destination)
os.chdir(path_destination)
submissionList = os.listdir(path_destination)
number = len(submissionList)
filename = 'submission_'+str(number+1)+'.csv'
submissionFile_DataFrame.to_csv(filename, index=False)
return submissionFile_DataFrame, filename, testMode
if testMode == True:
ProductType = 'population'
filename = 'submissionRawFile_2020_09_19.csv'
resultFunction2 = SubmissionBuilder(ProductType,filename,testMode)
print("=========================================")
print("Product Type:")
print("=========================================")
print(ProductType)
print("=========================================")
print("Submission File saved as:")
print("=========================================")
print(resultFunction2[1])
print("=========================================")
print("Test result Function 2: Success")
print("=========================================")
"""
=========================================
Function 3: Dataset builder (Stacking solution case) to process with ML models
=========================================
Purpose: Build an input dataset to be processed with an stacking solution
Raw code reference (see Tester.py): Test 15
"""
def stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode):
# Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set working directory
import os
os.chdir(path_ProductType)
# Get train dataset and test dataset
import pandas as pd
filename_trainDataset = 'train.csv'
train_dataset = pd.read_csv(path_ProductType+filename_trainDataset)
filename_testDataset = 'test.csv'
test_dataset = pd.read_csv(path_ProductType+filename_testDataset)
# Get submission dataset (template)
import numpy as np
path_resources = 'Y:/Kaggle_OSIC/3-Data (Prototype)/resources/'
if (PydicomMode == False):
filename_submissionDataset = 'submissionInputDataset.csv'
else:
filename_submissionDataset = 'submissionInputDataset_pydicom.csv'
submission_dataset = pd.read_csv(path_resources+filename_submissionDataset)
submission_dataset = submission_dataset.replace(np.nan,'iNaN')
# Adjust train dataset | Phase 1: Get ID list of the test dataset
IDList = list(test_dataset.Patient)
# Adjust train dataset | Phase 2: Get submission instances from train dataset
instancesPopulation = len(train_dataset.Patient)
indexList = []
for i in IDList:
for j in range(0,instancesPopulation):
if i == train_dataset.Patient[j]:
indexToInclude = train_dataset.index[j]
indexList = indexList + [indexToInclude]
# Adjust train dataset | Phase 3: Create an adjusted train dataset | a. Remove test instances from train dataset and reset index
train_dataset_adjusted = train_dataset.drop(indexList)
train_dataset_adjusted.reset_index
# Adjust train dataset | Phase 3: Create an adjusted train dataset | b. Get Transferring data from train dataset
instanceToTrasferList_index = []
for k in range(0,instancesPopulation):
for l in IDList:
if train_dataset.Patient[k] == l:
instanceToTransfer_Index = train_dataset.index[k]
instanceToTrasferList_index = instanceToTrasferList_index + [instanceToTransfer_Index]
train_dataset_instancesToTransfer = train_dataset.take(instanceToTrasferList_index)
train_dataset_instancesToTransfer.index
train_dataset_instancesToTransfer = train_dataset_instancesToTransfer.reset_index()
train_dataset_instancesToTransfer.drop(columns='index')
# Adjust train dataset | Phase 3: Create an adjusted train dataset | c. Update the submission dataset with the transferring data in b.
submission_dataset_range = len(submission_dataset.Patient)
train_dataset_instancesToTransfer_range = len(train_dataset_instancesToTransfer.Patient)
Patient_List = []
Week_List = []
FVC_List = []
Percent_List = []
Age_List = []
Sex_List = []
SmokingStatus_List = []
for m in range (0,submission_dataset_range):
timesCopy = 0
if(submission_dataset.Patient[m] in IDList):
referenceWeek = submission_dataset.Weeks[m]
for n in range (0,train_dataset_instancesToTransfer_range):
if(train_dataset_instancesToTransfer.Patient[n] == submission_dataset.Patient[m] and train_dataset_instancesToTransfer.Weeks[n] == referenceWeek):
if (timesCopy == 0):
submission_dataset.FVC[m] = train_dataset_instancesToTransfer.FVC[n]
submission_dataset.Percent[m] = train_dataset_instancesToTransfer.Percent[n]
submission_dataset.Age[m] = train_dataset_instancesToTransfer.Age[n]
submission_dataset.Sex[m] = train_dataset_instancesToTransfer.Sex[n]
submission_dataset.SmokingStatus[m] = train_dataset_instancesToTransfer.SmokingStatus[n]
timesCopy = timesCopy + 1
else:
# Additional instances to include
Patient_List = Patient_List + [train_dataset_instancesToTransfer.Patient[n]]
Week_List = Week_List + [train_dataset_instancesToTransfer.Weeks[n]]
FVC_List = FVC_List + [train_dataset_instancesToTransfer.FVC[n]]
Percent_List = Percent_List + [train_dataset_instancesToTransfer.Percent[n]]
Age_List = Age_List + [train_dataset_instancesToTransfer.Age[n]]
Sex_List = Sex_List + [train_dataset_instancesToTransfer.Sex[n]]
SmokingStatus_List = SmokingStatus_List + [train_dataset_instancesToTransfer.SmokingStatus[n]]
# Adjust train dataset | Phase 3: Create an adjusted train dataset | d. Add common values to submission dataset given those from the test dataset (Features: Age, Sex, SmokingStatus)
submission_dataset_range = len(submission_dataset.Patient)
for o in range(0,submission_dataset_range):
if(submission_dataset.Patient[o] in IDList):
for p in range(0,train_dataset_instancesToTransfer_range):
if(submission_dataset.Patient[o] == train_dataset_instancesToTransfer.Patient[p]):
submission_dataset.Age[o] = train_dataset_instancesToTransfer.Age[p]
submission_dataset.Sex[o] = train_dataset_instancesToTransfer.Sex[p]
submission_dataset.SmokingStatus[o] = train_dataset_instancesToTransfer.SmokingStatus[p]
# Scenario to replace NaN values: Average FVC for a given Patient
averageFVC = train_dataset_instancesToTransfer.FVC[train_dataset_instancesToTransfer.Patient == train_dataset_instancesToTransfer.Patient[p]].mean()
submission_dataset.FVC[o] = averageFVC
# Adjust train dataset | Phase 4: Create an adjusted train dataset | e. Concatenate the submission dataset (and additional instance) and the adjusted train dataset
additionalDictionary = {submission_dataset.columns[0]:Patient_List,
submission_dataset.columns[1]:Week_List,
submission_dataset.columns[2]:FVC_List,
submission_dataset.columns[3]:Percent_List,
submission_dataset.columns[4]:Age_List,
submission_dataset.columns[5]:Sex_List,
submission_dataset.columns[6]:SmokingStatus_List}
additional_dataset = pd.DataFrame(data=additionalDictionary)
frames = [train_dataset_adjusted,submission_dataset,additional_dataset]
train_dataset_adjusted = pd.concat(frames)
train_dataset_adjusted = train_dataset_adjusted.reset_index()
train_dataset_adjusted = train_dataset_adjusted.drop(columns='index')
# Adjust train dataset with pydicom train dataset) | Phase 1: Get pydicom train dataset
if(PydicomMode == True):
filename_pydicom = 'train_pydicom.csv'
path_ProductType_pydicom = path_ProductType + 'outcome/'
train_dataset_pydicom = pd.read_csv(path_ProductType_pydicom + filename_pydicom)
# Adjust train dataset with pydicom train dataset) | Phase 2: Include values from train_adjusted_pydicom.py into adjusted train dataset
if(PydicomMode == True):
instancesToInclude_List = list(train_dataset_pydicom.Patient)
InstanceToInclude_Patient = i
newIndex = len(train_dataset_adjusted.Patient)
for i in instancesToInclude_List:
# Get instance to transfer
InstanceToInclude_Patient = i
InstanceToInclude_Week = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].Weeks)[0]
InstanceToInclude_indexType1_Exhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Exhalation)[0]
InstanceToInclude_indexType1_Inhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Inhalation)[0]
InstanceToInclude_ImageType = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].ImageType)[0]
# Put instance into train_dataset_adjusted DataFrame
if (0 in list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Weeks)):
# Get index
indexToComplete = list(train_dataset_adjusted[train_dataset_adjusted.Weeks == 0].Patient[train_dataset_adjusted.Patient == i].index)
# Complete instance
train_dataset_adjusted.indexType1_Exhalation[indexToComplete] = InstanceToInclude_indexType1_Exhalation
train_dataset_adjusted.indexType1_Inhalation[indexToComplete] = InstanceToInclude_indexType1_Inhalation
train_dataset_adjusted.ImageType[indexToComplete] = str(InstanceToInclude_ImageType)
else:
# Add new instance
## Get repeatable instances
repeatableInstance1 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].FVC)[0]
repeatableInstance2 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Percent)[0]
repeatableInstance3 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Age)[0]
repeatableInstance4 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Sex)[0]
repeatableInstance5 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].SmokingStatus)[0]
## Get Dictionary
DictionaryToInclude = {}
DictionaryToInclude['Patient'] = InstanceToInclude_Patient
DictionaryToInclude['Weeks'] = InstanceToInclude_Week
DictionaryToInclude['FVC'] = repeatableInstance1
DictionaryToInclude['Percent'] = repeatableInstance2
DictionaryToInclude['Age'] = repeatableInstance3
DictionaryToInclude['Sex'] = repeatableInstance4
DictionaryToInclude['SmokingStatus'] = repeatableInstance5
DictionaryToInclude['indexType1_Exhalation'] = InstanceToInclude_indexType1_Exhalation
DictionaryToInclude['indexType1_Inhalation'] = InstanceToInclude_indexType1_Inhalation
DictionaryToInclude['ImageType'] = str(InstanceToInclude_ImageType)
## Get DataFrame
DataFrameToInclude = pd.DataFrame(data = DictionaryToInclude, index=[newIndex])
newIndex = newIndex + 1
## Concatenate DataFrame
train_dataset_adjusted = pd.concat([train_dataset_adjusted, DataFrameToInclude])
# nan filling
train_dataset_adjusted = train_dataset_adjusted.replace('iNaN',np.nan)
# Specifying dtype
train_dataset_adjusted.astype({'Patient': 'O'}).dtypes
train_dataset_adjusted.astype({'Weeks': 'float64'}).dtypes
train_dataset_adjusted.astype({'Percent': 'float64'}).dtypes
train_dataset_adjusted.astype({'Age': 'float64'}).dtypes
train_dataset_adjusted.astype({'Sex': 'O'}).dtypes
train_dataset_adjusted.astype({'SmokingStatus': 'O'}).dtypes
train_dataset_adjusted.astype({'FVC': 'float64'}).dtypes
if(PydicomMode == True):
train_dataset_adjusted.astype({'indexType1_Exhalation': 'float64'}).dtypes
train_dataset_adjusted.astype({'indexType1_Inhalation': 'float64'}).dtypes
train_dataset_adjusted.astype({'ImageType': 'O'}).dtypes
# Get CSV file
path_output = path_ProductType +'outcome/'
if(PydicomMode == False):
filename_output = 'train_adjusted.csv'
else:
filename_output = 'train_adjusted_pydicom.csv'
train_dataset_adjusted.to_csv(path_output+filename_output)
# Function Result
resultFunction = train_dataset_adjusted,path_output,filename_output
# Report Mode
if reportMode == True:
print("=========================================")
print("Function Report")
print("=========================================")
print("DataFrame")
print("=========================================")
print(resultFunction[0])
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction[1])
print("=========================================")
print("Input File saved as:", resultFunction[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
return resultFunction
if testMode == True:
ProductType = 'prototype'
PydicomMode = True
reportMode = False
resultFunction3 = stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode)
print("=========================================")
print("Function Report")
print("=========================================")
print("DataFrame")
print("=========================================")
print(resultFunction3[0])
print("=========================================")
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction3[1])
print("=========================================")
print("Input File saved as:", resultFunction3[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction3[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
"""
=========================================
Function 4: Submission dataset builder (Stacking solution case) after ML outcome
=========================================
Purpose: Build a submission CSV file (Stacking solution case)
Raw code reference (see Tester.py): Test 17
About the Shape Parameter: It amounts to c = 0.12607421874999922 for every instance in the oject of concern. c value has been computed
deeming the following data fitting scope: (1) Data: FVC predictions; (2) Probability density function as follows (staistical function
in scipy renowend as scipy.stats.loglaplace): loglaplace.pdf(x, c, loc=0, scale=1).
"""
def Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_DataFrame,pydicomMode,testMode):
# Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set working directory
import os
os.chdir(path_ProductType + 'outcome/')
# Get result data and test dataset
import pandas as pd
if(pydicomMode == True):
filename_resultDataset = 'result_pydicom.csv'
else:
filename_resultDataset = 'result.csv'
result_dataset = pd.read_csv(path_ProductType+'outcome/'+filename_resultDataset)
filename_testDataset = 'test.csv'
test_dataset = pd.read_csv(path_ProductType+filename_testDataset)
# Get submission instances | Phase 1: Index
IDList = list(test_dataset.Patient)
IDList_index_dictionary = {}
for i in IDList:
itemToInclude = result_dataset.Patient[result_dataset.Patient==i].index
IDList_index_dictionary[i] = itemToInclude
# Get submission instances | Phase 2: Extract submission instances from result dataset
IDList_index = []
IDList_columns = ['Patient', 'Weeks', 'Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor']
for j in IDList: IDList_index = IDList_index + list(IDList_index_dictionary[j])
submission_dataset = result_dataset.loc[IDList_index]
# Get submission instances | Phase 3: Extract duplicated instances
submission_dataset = submission_dataset.drop_duplicates(subset=['Patient','Weeks'])
# Get submission instances | Phase 4: Sort submission instances by Weeks (ascending) and reset index
submission_dataset = submission_dataset.sort_values(by=['Weeks','Patient'])
submission_dataset = submission_dataset.reset_index()
submission_dataset = submission_dataset.drop(columns=['Unnamed: 0','index'])
# Get confidence measure | Phase 1: Get shape Parameter DataFrame by default
## When shapeParameter_DataFrame==[], parameter c = 0.126074 is assigned by default per model and ID
if (shapeParameter_DataFrame == []):
shapeParameter_dictionary = {}
shapeParameter = 0.126074
MLModelList = IDList_columns[2:]
for l in MLModelList:
keyShapeParameter = 'c Parameter_'+l
shapeParameter_dictionary[keyShapeParameter] = [shapeParameter,shapeParameter,shapeParameter,shapeParameter,shapeParameter]
shapeParameter_DataFrame = pd.DataFrame(data = shapeParameter_dictionary, index = IDList)
# Get confidence measure | Phase 2: Get standard-deviation-clipped per instance
## Metric - Part 1: standard_deviation_clipped = max(standard_deviation, 70)
## Build a DataFrame with standard-deviation-clipped values given an ID and a ML Model: standardDeviationClipped_DataFrame
standardDeviationClipped_DataFrame = shapeParameter_DataFrame.copy()
columnLabels = list(standardDeviationClipped_DataFrame.columns)
columnLabels_SDC_dictionary = {}
for i in columnLabels:
columnLabels_item ='SD_Clipped'+i[11:]
columnLabels_SDC_dictionary[i]=columnLabels_item
standardDeviationClipped_DataFrame = standardDeviationClipped_DataFrame.rename(columns=columnLabels_SDC_dictionary)
import numpy as np
standardDeviationClipped_DataFrame = standardDeviationClipped_DataFrame.replace(3,np.nan)
ID_List = list(standardDeviationClipped_DataFrame.index)
SDModel_List = list(standardDeviationClipped_DataFrame.columns)
CParameter_List = list(shapeParameter_DataFrame.columns)
numy = 0
from scipy.stats import loglaplace
for j in ID_List:
for k in SDModel_List:
itemToInclude = CParameter_List[numy]
c = shapeParameter_DataFrame[itemToInclude][j]
sd_LL = loglaplace.std(c, loc=0, scale=100)
standardDeviationClipped_DataFrame[k][j] = max(70,sd_LL) # j: index is ID | k: SD_Clipped_(ML Model)
numy = numy + 1
numy = 0
# Get confidence measure | Phase 3: Get metric axe per model: |FVC_true - FVC_predicted|
## Metric - Part 1: |FVC_true - FVC_pred|
if(pydicomMode == True):
variableNumber = 10
else:
variableNumber = 7
MLModelList = list(submission_dataset.columns[variableNumber:])
metric_dictionary = {}
for j in MLModelList:
metric_differential = abs(submission_dataset.FVC - submission_dataset[j])
metric_differential = list(metric_differential)
keyToInclude = 'metric_'+j
metric_dictionary[keyToInclude] = metric_differential
metric_DataFrame = pd.DataFrame(data=metric_dictionary)
# Get confidence measure | Phase 4: Get metric axe per model: min(|FVC_true - FVC_predicted|, 1000)
## metric per instance
## Metric - Part 2: min(|FVC_true - FVC_pred|,1000)
metricLabels = list(metric_DataFrame.columns)
instancesNumber = len(submission_dataset.index)
for i in metricLabels:
j = 0
while (j<instancesNumber):
metric_DataFrame[i][j] = min(metric_DataFrame[i][j],1000)
j = j+1
submission_dataset = submission_dataset.join(metric_DataFrame)
# Get confidence measure | Phase 5: Get metric axe per model: (-1 * differential * 2^0.5 / SDC ) - ln(2^0.5 * SCD)
## metric per instance
## differential = min(|FVC_true - FVC_predicted|, 1000)
## SDC: Standard Deviation Clipped
## Metric - Part 2: min(|FVC_true - FVC_pred|,1000)
IDList = list(test_dataset.Patient)
SDModel_List = list(standardDeviationClipped_DataFrame.columns)
SDModel_index_List = list(standardDeviationClipped_DataFrame.index)
metric_lists = list(metric_DataFrame.columns)
metric_index_lists = list(metric_DataFrame.index)
submission_dataset_index_List = list(submission_dataset.index)
instancesNumber = len(submission_dataset_index_List)
indexPerID_dictionary = {}
### Step 1: Get index per ID to compute
for i in IDList:
listToInclude = list(submission_dataset.Patient[submission_dataset.Patient == i].index)
indexPerID_dictionary[i] = listToInclude
indexPerID_DataFrame = pd.DataFrame(data=indexPerID_dictionary)
### Step 3: Compute metric
import math
from math import log1p
for k in IDList:
for i in metric_lists:
for j in list(indexPerID_DataFrame[k]):
differential = submission_dataset[i][j]
SDC_Label = 'SD_Clipped_' + i[7:]
SDC = standardDeviationClipped_DataFrame[SDC_Label][k]
metric_part1 = -1* 2**0.5 * differential / SDC
metric_part2 = -1 * math.log1p(2**0.5 * SDC)
metric = metric_part1 + metric_part2
submission_dataset[i][j] = metric
# Result function specification
resultFunction = submission_dataset,shapeParameter_DataFrame,standardDeviationClipped_DataFrame
# Get submission files | Phase 1: Get submission file template
filename = 'sample_submission.csv'
submissionFile = pd.read_csv(path_ProductType+filename)
## Get submission files | Phase 2: Create directory
try:
path_output = path_ProductType + 'submission/'
os.chdir(path_output)
except FileNotFoundError:
import distutils.ccompiler
path_output = path_ProductType + 'submission/'
distutils.dir_util.mkpath(path_output)
## Get submission files | Phase 3: Get correlative
files_list = os.listdir(path_output)
try:
maxNumber = max(files_list)
maxNumber = maxNumber[:-4]
maxNumber = int(maxNumber)
nextNumber = maxNumber+1
except ValueError:
nextNumber = 0
## Get submission files | Phase 4: Get models to include and their corresponding metrics
ModelToInclude = IDList_columns[2:]
## Get submission files | Phase 5: Build Files
for i in ModelToInclude:
filename = 'sample_submission.csv'
submissionFile = pd.read_csv(path_ProductType+filename)
submissionFile_columns = list(submissionFile.columns)
fvc_array = np.array(submission_dataset[i])
confidence_array = np.array(submission_dataset['metric_'+i])
submissionFile['FVC'] = fvc_array
submissionFile['Confidence'] = confidence_array
filename_output = str(nextNumber)+'.csv'
path_output = path_ProductType +'submission/'
submissionFile.to_csv(path_output+filename_output,columns=submissionFile_columns,index=False)
nextNumber = nextNumber + 1
return resultFunction
if testMode == True:
# Set Product type
ProductType = 'prototype'
# ShapeParameter_Dataframe
example = False
if (example == True):
import pandas as pd
shapeParameter_IDList = ['ID00419637202311204720264','ID00421637202311550012437','ID00422637202311677017371','ID00423637202312137826377','ID00426637202313170790466']
c_List1 = [3,3,3,3,3]
c_List2 = [3,3,3,3,3]
c_List3 = [3,3,3,3,3]
c_List4 = [3,3,3,3,3]
shapeParameter_dictionary = {'Random Forest':c_List1, 'Lasso':c_List2, 'Gradient Boosting':c_List3, 'Stacking Regressor':c_List4}
shapeParameter_DataFrame = pd.DataFrame(data = shapeParameter_dictionary, index = shapeParameter_IDList)
else:
shapeParameter_DataFrame = []
# Set Pydicom mode
pydicomMode = True
resultFunction4 = Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_DataFrame,pydicomMode,testMode)
print("=========================================")
print("Shape Parameter - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction4[1])
print("Standard Deviation Clipped - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction4[2])
print("=========================================")
print("Test result Function 4: Success")
print("=========================================")
"""
=========================================
Function 5: Get parameters given a must-usage of a log-laplace distribution (i.e. Laplace Log Likelihood)
=========================================
Purpose: Get shape parameter visualization for loglaplace
Raw code reference (see Tester.py): Test 17
"""
def shapeParameter_visualizer(ProductType,testMode):
import numpy as np
from scipy.stats import loglaplace
import matplotlib.pyplot as plt
fig, ax = plt.subplots(4, 5, sharex=False, sharey=False, figsize=(32, 24))
## Get IDs to test
import os
import pandas as pd
## Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
## Get probabilities from predicted values grouping by ID and Model
path = path_ProductType + 'outcome/'
filename = 'result.csv'
y_pred = pd.read_csv(path+filename)
## Get IDs to test
path = path_ProductType
filename = 'test.csv'
test_dataset = pd.read_csv(path+filename)
ID_List = list(test_dataset.Patient)
## Get models
model_List = ['Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor']
## Grouping task
k = 0
l = 0
for i in ID_List:
k = 0
for j in model_List:
# Data Fit task
#r = y_pred[y_pred.Patient==i][j]/sum(y_pred[y_pred.Patient==i][j])
r = y_pred[y_pred.Patient==i][j]
r = np.array(r)
c1, loc1, scale1 = loglaplace.fit(r,floc=0,fscale=1)
c = c1
# # Calculate a few first moments
# mean, var, skew, kurt = loglaplace.stats(c, moments='mvsk')
# Display the probability density function (pdf):
x = np.linspace(loglaplace.ppf(0.01, c), loglaplace.ppf(0.99, c), num=100)
ax[k,l].plot(x, loglaplace.pdf(x, c),'r-', lw=5, alpha=0.6, label='loglaplace pdf')
# Freeze the distribution and display the frozen pdf:
rv = loglaplace(c)
ax[k,l].plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
# Generate random numbers:
r = loglaplace.rvs(c1, loc=0, scale=1, size=1000)
# And compare the histogram:
#ax[k,l].hist(r, density=True, histtype='stepfilled', alpha=0.2)
ax[k,l].legend(loc='best', frameon=False)
# Set limits
#ax[k,l].set_xlim(0,0.1)
#ax[k,l].set_ylim(0,4)
ax[k,l].set_xlabel('x')
ax[k,l].set_ylabel('f(x,c)')
# Check Accuracy
vals = loglaplace.ppf([0.001, 0.5, 0.999], c)
accuracy = np.allclose([0.001, 0.5, 0.999], loglaplace.cdf(vals, c))
# Returns True if two arrays are element-wise equal within a tolerance.
if(accuracy == True):
accuracy = 'Equal case'
else:
accuracy = 'Unequal case'
# Set title
title = str('Probability density function for loglaplace'+'\n'+i + '\n' + j + ' | Accuracy:'+accuracy)
ax[k,l].set_title(title)
k = k + 1
l = l + 1
plt.tight_layout()
plt.show()
resultFunction = c
return resultFunction
if testMode == True:
# Set Product type
ProductType = 'prototype'
# ShapeParameter_Dataframe
resultFunction5 = shapeParameter_visualizer(ProductType, testMode = True)
print("=========================================")
print("Shape Parameter - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction5)
print("=========================================")
print("Test result Function 4: Success")
print("=========================================")
# """
# =========================================
# Function : Dataset builder 2 (Stacking solution case) to process with ML models
# =========================================
# Purpose: Build an input dataset to be processed with an stacking solution but including Pydicom image-processing solution
# Raw code reference (see Tester.py): 15
# """
# def stacking_Dataset_Builder_PydicomSolution(productType, testMode):
# # Set Product Type and its corresponding path
# if ProductType == 'population':
# path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
# if ProductType == 'prototype':
# path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
# if ProductType == 'sampling':
# path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
| [((62, 4, 62, 25), 'os.chdir', 'os.chdir', ({(62, 13, 62, 24): 'path_source'}, {}), '(path_source)', False, 'import os\n'), ((64, 14, 64, 37), 'os.listdir', 'os.listdir', ({(64, 25, 64, 36): 'path_source'}, {}), '(path_source)', False, 'import os\n'), ((79, 27, 79, 69), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((90, 38, 90, 89), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((92, 27, 92, 112), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((121, 32, 121, 77), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((133, 4, 133, 33), 'distutils.dir_util.remove_tree', 'remove_tree', ({(133, 16, 133, 32): 'path_destination'}, {}), '(path_destination)', False, 'from distutils.dir_util import remove_tree\n'), ((134, 4, 134, 36), 'distutils.dir_util.create_tree', 'create_tree', ({(134, 16, 134, 32): 'path_destination', (134, 33, 134, 35): '[]'}, {}), '(path_destination, [])', False, 'from distutils.dir_util import create_tree\n'), ((156, 4, 156, 38), 'distutils.dir_util.remove_tree', 'remove_tree', ({(156, 16, 156, 37): 'path_destination_test'}, {}), '(path_destination_test)', False, 'from distutils.dir_util import remove_tree\n'), ((157, 4, 157, 41), 'distutils.dir_util.create_tree', 'create_tree', ({(157, 16, 157, 37): 'path_destination_test', (157, 38, 157, 40): '[]'}, {}), '(path_destination_test, [])', False, 'from distutils.dir_util import create_tree\n'), ((164, 18, 164, 46), 'os.listdir', 'os.listdir', ({(164, 29, 164, 45): 'path_source_test'}, {}), '(path_source_test)', False, 'import os\n'), ((261, 4, 261, 26), 'os.chdir', 'os.chdir', ({(261, 13, 261, 25): 'path_outcome'}, {}), '(path_outcome)', False, 'import os\n'), ((262, 24, 262, 71), 'pandas.read_csv', 'pd.read_csv', ({(262, 36, 262, 70): '"""submissionRawFile_2020_09_19.csv"""'}, {}), "('submissionRawFile_2020_09_19.csv')", True, 'import pandas as pd\n'), ((265, 4, 265, 30), 'os.chdir', 'os.chdir', ({(265, 13, 265, 29): 'path_ProductType'}, {}), '(path_ProductType)', False, 'import os\n'), ((266, 31, 266, 67), 'pandas.read_csv', 'pd.read_csv', ({(266, 43, 266, 66): '"""sample_submission.csv"""'}, {}), "('sample_submission.csv')", True, 'import pandas as pd\n'), ((326, 21, 326, 49), 'os.listdir', 'os.listdir', ({(326, 32, 326, 48): 'path_destination'}, {}), '(path_destination)', False, 'import os\n'), ((380, 4, 380, 30), 'os.chdir', 'os.chdir', ({(380, 13, 380, 29): 'path_ProductType'}, {}), '(path_ProductType)', False, 'import os\n'), ((385, 20, 385, 71), 'pandas.read_csv', 'pd.read_csv', ({(385, 32, 385, 70): 'path_ProductType + filename_trainDataset'}, {}), '(path_ProductType + filename_trainDataset)', True, 'import pandas as pd\n'), ((387, 19, 387, 69), 'pandas.read_csv', 'pd.read_csv', ({(387, 31, 387, 68): 'path_ProductType + filename_testDataset'}, {}), '(path_ProductType + filename_testDataset)', True, 'import pandas as pd\n'), ((397, 25, 397, 79), 'pandas.read_csv', 'pd.read_csv', ({(397, 37, 397, 78): 'path_resources + filename_submissionDataset'}, {}), '(path_resources + filename_submissionDataset)', True, 'import pandas as pd\n'), ((489, 25, 489, 64), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((493, 29, 493, 46), 'pandas.concat', 'pd.concat', ({(493, 39, 493, 45): 'frames'}, {}), '(frames)', True, 'import pandas as pd\n'), ((685, 4, 685, 43), 'os.chdir', 'os.chdir', ({(685, 13, 685, 42): "(path_ProductType + 'outcome/')"}, {}), "(path_ProductType + 'outcome/')", False, 'import os\n'), ((694, 21, 694, 84), 'pandas.read_csv', 'pd.read_csv', ({(694, 33, 694, 83): "path_ProductType + 'outcome/' + filename_resultDataset"}, {}), "(path_ProductType + 'outcome/' + filename_resultDataset)", True, 'import pandas as pd\n'), ((696, 19, 696, 69), 'pandas.read_csv', 'pd.read_csv', ({(696, 31, 696, 68): 'path_ProductType + filename_testDataset'}, {}), '(path_ProductType + filename_testDataset)', True, 'import pandas as pd\n'), ((826, 27, 826, 67), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((853, 21, 853, 59), 'pandas.read_csv', 'pd.read_csv', ({(853, 33, 853, 58): 'path_ProductType + filename'}, {}), '(path_ProductType + filename)', True, 'import pandas as pd\n'), ((865, 17, 865, 40), 'os.listdir', 'os.listdir', ({(865, 28, 865, 39): 'path_output'}, {}), '(path_output)', False, 'import os\n'), ((948, 15, 948, 79), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((967, 13, 967, 39), 'pandas.read_csv', 'pd.read_csv', ({(967, 25, 967, 38): 'path + filename'}, {}), '(path + filename)', True, 'import pandas as pd\n'), ((972, 19, 972, 45), 'pandas.read_csv', 'pd.read_csv', ({(972, 31, 972, 44): 'path + filename'}, {}), '(path + filename)', True, 'import pandas as pd\n'), ((1038, 4, 1038, 22), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((1039, 4, 1039, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((72, 33, 72, 52), 'os.listdir', 'os.listdir', ({(72, 44, 72, 51): 'path_ID'}, {}), '(path_ID)', False, 'import os\n'), ((109, 23, 109, 68), 'random.randrange', 'random.randrange', ({(109, 40, 109, 41): '0', (109, 42, 109, 65): 'randomNumberTermination', (109, 66, 109, 67): '1'}, {}), '(0, randomNumberTermination, 1)', False, 'import random\n'), ((145, 8, 145, 48), 'distutils.dir_util.create_tree', 'create_tree', ({(145, 20, 145, 44): 'path_destination_unitary', (145, 45, 145, 47): '[]'}, {}), '(path_destination_unitary, [])', False, 'from distutils.dir_util import create_tree\n'), ((146, 8, 146, 63), 'distutils.dir_util.copy_tree', 'copy_tree', ({(146, 18, 146, 37): 'path_source_unitary', (146, 38, 146, 62): 'path_destination_unitary'}, {}), '(path_source_unitary, path_destination_unitary)', False, 'from distutils.dir_util import copy_tree\n'), ((170, 8, 170, 48), 'distutils.dir_util.create_tree', 'create_tree', ({(170, 20, 170, 44): 'path_destination_unitary', (170, 45, 170, 47): '[]'}, {}), '(path_destination_unitary, [])', False, 'from distutils.dir_util import create_tree\n'), ((171, 8, 171, 63), 'distutils.dir_util.copy_tree', 'copy_tree', ({(171, 18, 171, 37): 'path_source_unitary', (171, 38, 171, 62): 'path_destination_unitary'}, {}), '(path_source_unitary, path_destination_unitary)', False, 'from distutils.dir_util import copy_tree\n'), ((176, 21, 176, 33), 'datetime.date.today', 'date.today', ({}, {}), '()', False, 'from datetime import date\n'), ((315, 8, 315, 34), 'os.chdir', 'os.chdir', ({(315, 17, 315, 33): 'path_destination'}, {}), '(path_destination)', False, 'import os\n'), ((323, 8, 323, 32), 'distutils.dir_util.mkpath', 'mkpath', ({(323, 15, 323, 31): 'path_destination'}, {}), '(path_destination)', False, 'from distutils.dir_util import mkpath\n'), ((324, 8, 324, 34), 'os.chdir', 'os.chdir', ({(324, 17, 324, 33): 'path_destination'}, {}), '(path_destination)', False, 'import os\n'), ((502, 32, 502, 88), 'pandas.read_csv', 'pd.read_csv', ({(502, 44, 502, 87): 'path_ProductType_pydicom + filename_pydicom'}, {}), '(path_ProductType_pydicom + filename_pydicom)', True, 'import pandas as pd\n'), ((732, 35, 732, 97), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((783, 27, 783, 63), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((858, 8, 858, 29), 'os.chdir', 'os.chdir', ({(858, 17, 858, 28): 'path_output'}, {}), '(path_output)', False, 'import os\n'), ((880, 25, 880, 63), 'pandas.read_csv', 'pd.read_csv', ({(880, 37, 880, 62): 'path_ProductType + filename'}, {}), '(path_ProductType + filename)', True, 'import pandas as pd\n'), ((883, 20, 883, 51), 'numpy.array', 'np.array', ({(883, 29, 883, 50): 'submission_dataset[i]'}, {}), '(submission_dataset[i])', True, 'import numpy as np\n'), ((884, 27, 884, 68), 'numpy.array', 'np.array', ({(884, 36, 884, 67): "submission_dataset['metric_' + i]"}, {}), "(submission_dataset['metric_' + i])", True, 'import numpy as np\n'), ((911, 35, 911, 112), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((761, 20, 761, 55), 'scipy.stats.loglaplace.std', 'loglaplace.std', (), '', False, 'from scipy.stats import loglaplace\n'), ((992, 16, 992, 27), 'numpy.array', 'np.array', ({(992, 25, 992, 26): 'r'}, {}), '(r)', True, 'import numpy as np\n'), ((994, 31, 994, 64), 'scipy.stats.loglaplace.fit', 'loglaplace.fit', (), '', False, 'from scipy.stats import loglaplace\n'), ((1005, 17, 1005, 30), 'scipy.stats.loglaplace', 'loglaplace', ({(1005, 28, 1005, 29): 'c'}, {}), '(c)', False, 'from scipy.stats import loglaplace\n'), ((1009, 16, 1009, 61), 'scipy.stats.loglaplace.rvs', 'loglaplace.rvs', (), '', False, 'from scipy.stats import loglaplace\n'), ((1022, 19, 1022, 57), 'scipy.stats.loglaplace.ppf', 'loglaplace.ppf', ({(1022, 34, 1022, 53): '[0.001, 0.5, 0.999]', (1022, 55, 1022, 56): 'c'}, {}), '([0.001, 0.5, 0.999], c)', False, 'from scipy.stats import loglaplace\n'), ((562, 37, 562, 95), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((566, 41, 566, 96), 'pandas.concat', 'pd.concat', ({(566, 51, 566, 95): '[train_dataset_adjusted, DataFrameToInclude]'}, {}), '([train_dataset_adjusted, DataFrameToInclude])', True, 'import pandas as pd\n'), ((1001, 28, 1001, 51), 'scipy.stats.loglaplace.ppf', 'loglaplace.ppf', ({(1001, 43, 1001, 47): '0.01', (1001, 49, 1001, 50): 'c'}, {}), '(0.01, c)', False, 'from scipy.stats import loglaplace\n'), ((1001, 53, 1001, 76), 'scipy.stats.loglaplace.ppf', 'loglaplace.ppf', ({(1001, 68, 1001, 72): '0.99', (1001, 74, 1001, 75): 'c'}, {}), '(0.99, c)', False, 'from scipy.stats import loglaplace\n'), ((1002, 28, 1002, 48), 'scipy.stats.loglaplace.pdf', 'loglaplace.pdf', ({(1002, 43, 1002, 44): 'x', (1002, 46, 1002, 47): 'c'}, {}), '(x, c)', False, 'from scipy.stats import loglaplace\n'), ((1023, 56, 1023, 79), 'scipy.stats.loglaplace.cdf', 'loglaplace.cdf', ({(1023, 71, 1023, 75): 'vals', (1023, 77, 1023, 78): 'c'}, {}), '(vals, c)', False, 'from scipy.stats import loglaplace\n'), ((842, 36, 842, 60), 'math.log1p', 'math.log1p', ({(842, 47, 842, 59): '(2 ** 0.5 * SDC)'}, {}), '(2 ** 0.5 * SDC)', False, 'import math\n')] |
ahmed-shariff/scraper | paperscraper/scrapers/keywords.py | 52bed967db7e08e438daaa8dfa8d9338567ad7c2 | import re
regex = re.compile(r'[\n\r\t]')
def acm_digital_library(soup):
try:
keywords = set()
keywords_parent_ol = soup.find('ol', class_="rlist organizational-chart")
keywords_divs = keywords_parent_ol.findChildren('div', recursive=True)
for kw_parent in keywords_divs:
kw = kw_parent.text
keywords.add(regex.sub("", kw.split(",")[0]))
return list(keywords)
except Exception as e:
print(e)
return None
def graphics_interface_proceedings(soup):
return None
def ieee_explore(soup):
try:
keywords = set()
ggp_ul = soup.find('ul', class_="doc-keywords-list stats-keywords-list")
gp_li = ggp_ul.findChildren("li", class_="doc-keywords-list-item", recursive=False)
for p_li in gp_li:
if p_li.find('strong').text in ["IEEE Keywords", "INSPEC: Controlled Indexing", "INSPEC: Non-Controlled Indexing", "MeSH Terms"]:
for keywords_l in p_li.find('ul').findChildren("li", recursive=False):
a_tag = keywords_l.find("a", class_="stats-keywords-list-item")
if a_tag is not None:
keywords.add(str(regex.sub("", a_tag.text.split(",")[0])))
else:
keywords.add(str(regex.sub("", str(keywords_l.text).split(",")[0])))
return list(keywords)
except Exception as e:
print(e)
return None
def eurographics_digital_library(soup):
try:
keywords_set = set()
p_tablebody = soup.find('table', class_="detailtable").find("tbody")
p_trs = p_tablebody.findChildren('tr')
for tr in p_trs:
label = tr.find("td", class_="label-cell")
if label.text == "dc.subject":
keywords = tr.find("td", class_="word-break")
# e.g. CASE 1: ['Categories and Subject Descriptors (according to ACM CCS): I.4.1 [Image Processing and Computer Vision]: Enhancement-Filtering I.3.3 [Computer Graphics]: Picture/Image Generation-Bitmap and framebuffer operations']
# e.g. CASE 2 [TODO: Not taken care of yet] Categories and Subject Descriptors (according to ACM CCS): Information Interfaces And Presentation (e.g., HCI) [H.5.2]: User Interfaces-Graphical user interfaces (GUI)
# Step 1: Remove annoying substrings
# Step 2: Choose to take ONLY Categories, not the Subject Descriptors > Write a REGEX to take substrings between [].
# Step 3: Split the string by , or ; or :
to_replaces = ["CCS Concepts", "Categories and Subject Descriptors", "Categories and subject descriptors", "Categories and Subject Descriptors (according to ACM CCS)", "according to ACM CCS"]
keywords_str = keywords.text
for to_replace in to_replaces:
keywords_str = keywords_str.replace(to_replace, "")
keywords_extracted = re.findall(r'\[(.*?)\]', keywords_str)
if keywords_extracted:
keywords_set.update(keywords_extracted)
else:
keywords_set.update(re.split(',|:|;', keywords_str))
return list(keywords_set)
except Exception as e:
print(e)
return None
def springer_v2(soup):
try:
keywords = set()
keywords_parent_div = soup.find('div', class_="KeywordGroup")
keywords_span = keywords_parent_div.findChildren("span", class_="Keyword")
for k in keywords_span:
keywords.add(k.text)
return list(keywords)
except Exception as e:
print(e)
return None
def dagstuhl(soup):
try:
keywords_label = soup.find('b', text="Keywords:")
keywords_parent_font = keywords_label.parent
keywords_parent_td = keywords_parent_font.parent
keywords_font = keywords_parent_td.find_next('td').find_next('td').find("font")
if keywords_font is not None:
return re.split(',', keywords_font.text)
except Exception as e:
print(e)
return None
def springer_v1(soup):
try:
keywords = set()
keywords_parent_section = soup.find('ul', class_="c-article-subject-list")
keywords_li = keywords_parent_section.findChildren("li", class_="c-article-subject-list__subject")
for k in keywords_li:
kw = k.find("span").text
keywords.add(str(regex.sub("", kw)).strip())
return list(keywords)
except Exception as e:
print(e)
return None
def wiley_online_library(soup):
try:
keywords_parent_section = soup.find('section', class_="keywords")
keywords_ul = keywords_parent_section.find('ul')
keywords_lis = keywords_ul.findChildren("li")
keywords_set = set()
for keywords_li in keywords_lis:
# e.g. Case 1: "[3.1.1] Human-Centered Computing" and so on
# e.g. Case 2: CCS Concepts don't have '[' and ']' but they have strings such as "• Human‐centered computing → Graph drawings"
# Step 1: Remove annoying substrings
# Step 2: Choose to take ONLY Categories, not the Subject Descriptors > Write a REGEX to take substrings between [].
# Step 3: Split the string by , or ; or :
to_replaces = ["CCS Concepts", "Categories and Subject Descriptors", "Categories and subject descriptors", "Categories and Subject Descriptors (according to ACM CCS)", "according to ACM CCS"]
keywords_str = keywords_li.find("a").text
for to_replace in to_replaces:
keywords_str = keywords_str.replace(to_replace, "")
keywords_extracted = re.findall(r'\[(.*?)\]', keywords_str)
if keywords_extracted:
keywords_set.update(keywords_extracted)
else:
# CCS Concepts don't have '[' and ']' but they have strings such as "• Human‐centered computing → Graph drawings"
regex_find = r'•(.*)→(.*)'
regex_replace = r'\1;\2' # set the delimiter to either , : ; (as is used below to split)
keywords_str = re.sub(regex_find, regex_replace, keywords_str)
keywords_set.update(re.split(',|:|;', keywords_str))
return list(keywords_set)
except Exception as e:
print(e)
return None
def cogsci(soup):
return None
def scitepress(soup):
try:
keywords_set = set()
keywords_span = soup.find('span', id="ContentPlaceHolder1_LinkPaperPage_LinkPaperContent_LabelPublicationDetailKeywords")
for kw in keywords_span.text.split(","):
keywords_set.add(kw)
return list(keywords_set)
except Exception as e:
print(e)
return None
def scienceopen(soup):
try:
keywords_set = set()
for span_label in soup.find_all('span', class_="so-metadata-label"):
if "Keywords" in span_label.text:
for keyword_a in span_label.find_next_siblings('a'):
keywords_set.add(keyword_a.text)
return list(keywords_set)
except Exception as e:
pass
return None
def aaai(soup):
return None
def get_keywords(publisher, soup):
keywords_list = None
if publisher == "acm_digital_library":
keywords_list = acm_digital_library(soup)
elif publisher == "graphics_interface_proceedings":
keywords_list = graphics_interface_proceedings(soup)
elif publisher == "ieee_explore":
keywords_list = ieee_explore(soup)
elif publisher == "cogsci":
keywords_list = cogsci(soup)
elif publisher == "springer_v1":
keywords_list = springer_v1(soup)
elif publisher == "springer_v2":
keywords_list = springer_v2(soup)
elif publisher == "scitepress":
keywords_list = scitepress(soup)
elif publisher == "scienceopen":
keywords_list = scienceopen(soup)
elif publisher == "eurographics_digital_library":
keywords_list = eurographics_digital_library(soup)
elif publisher == "wiley_online_library":
keywords_list = wiley_online_library(soup)
elif publisher == "dagstuhl":
keywords_list = dagstuhl(soup)
elif publisher == "aaai":
keywords_list = aaai(soup)
return None if len(keywords_list) == 0 else keywords_list
| [((3, 8, 3, 31), 're.compile', 're.compile', ({(3, 19, 3, 30): '"""[\\\\n\\\\r\\\\t]"""'}, {}), "('[\\\\n\\\\r\\\\t]')", False, 'import re\n'), ((92, 19, 92, 52), 're.split', 're.split', ({(92, 28, 92, 31): '""","""', (92, 33, 92, 51): 'keywords_font.text'}, {}), "(',', keywords_font.text)", False, 'import re\n'), ((129, 33, 129, 71), 're.findall', 're.findall', ({(129, 44, 129, 56): '"""\\\\[(.*?)\\\\]"""', (129, 58, 129, 70): 'keywords_str'}, {}), "('\\\\[(.*?)\\\\]', keywords_str)", False, 'import re\n'), ((61, 37, 61, 75), 're.findall', 're.findall', ({(61, 48, 61, 60): '"""\\\\[(.*?)\\\\]"""', (61, 62, 61, 74): 'keywords_str'}, {}), "('\\\\[(.*?)\\\\]', keywords_str)", False, 'import re\n'), ((136, 31, 136, 78), 're.sub', 're.sub', ({(136, 38, 136, 48): 'regex_find', (136, 50, 136, 63): 'regex_replace', (136, 65, 136, 77): 'keywords_str'}, {}), '(regex_find, regex_replace, keywords_str)', False, 'import re\n'), ((137, 36, 137, 67), 're.split', 're.split', ({(137, 45, 137, 52): '""",|:|;"""', (137, 54, 137, 66): 'keywords_str'}, {}), "(',|:|;', keywords_str)", False, 'import re\n'), ((65, 40, 65, 71), 're.split', 're.split', ({(65, 49, 65, 56): '""",|:|;"""', (65, 58, 65, 70): 'keywords_str'}, {}), "(',|:|;', keywords_str)", False, 'import re\n')] |
ContactEngineering/TopoBank | topobank/publication/models.py | 12710c24cc158801db20f030c3e0638060e24a0e | from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.conf import settings
MAX_LEN_AUTHORS_FIELD = 512
CITATION_FORMAT_FLAVORS = ['html', 'ris', 'bibtex', 'biblatex']
DEFAULT_KEYWORDS = ['surface', 'topography']
class UnknownCitationFormat(Exception):
def __init__(self, flavor):
self._flavor = flavor
def __str__(self):
return f"Unknown citation format flavor '{self._flavor}'."
class Publication(models.Model):
LICENSE_CHOICES = [(k, settings.CC_LICENSE_INFOS[k]['option_name'])
for k in ['cc0-1.0', 'ccby-4.0', 'ccbysa-4.0']]
short_url = models.CharField(max_length=10, unique=True, null=True)
surface = models.OneToOneField("manager.Surface", on_delete=models.PROTECT, related_name='publication')
original_surface = models.ForeignKey("manager.Surface", on_delete=models.SET_NULL,
null=True, related_name='derived_publications')
publisher = models.ForeignKey("users.User", on_delete=models.PROTECT)
publisher_orcid_id = models.CharField(max_length=19, default='') # 16 digits including 3 dashes
version = models.PositiveIntegerField(default=1)
datetime = models.DateTimeField(auto_now_add=True)
license = models.CharField(max_length=12, choices=LICENSE_CHOICES, blank=False, default='')
authors = models.CharField(max_length=MAX_LEN_AUTHORS_FIELD)
container = models.FileField(max_length=50, default='')
def get_absolute_url(self):
return reverse('publication:go', args=[self.short_url])
def get_full_url(self, request):
return request.build_absolute_uri(self.get_absolute_url())
def get_citation(self, flavor, request):
if flavor not in CITATION_FORMAT_FLAVORS:
raise UnknownCitationFormat(flavor)
method_name = '_get_citation_as_'+flavor
return getattr(self, method_name)(request)
def _get_citation_as_html(self, request):
s = '{authors}. ({year}). contact.engineering. <em>{surface.name} (Version {version})</em>.'
s += ' <a href="{publication_url}">{publication_url}</a>'
s = s.format(
authors=self.authors,
year=self.datetime.year,
version=self.version,
surface=self.surface,
publication_url=self.get_full_url(request),
)
return mark_safe(s)
def _get_citation_as_ris(self, request):
# see http://refdb.sourceforge.net/manual-0.9.6/sect1-ris-format.html
# or https://en.wikipedia.org/wiki/RIS_(file_format)
# or https://web.archive.org/web/20120526103719/http://refman.com/support/risformat_intro.asp
# https://web.archive.org/web/20120717122530/http://refman.com/support/direct%20export.zip
s = ""
def add(key, value):
nonlocal s
s += f"{key} - {value}\n"
# Electronic citation / Website
add('TY', 'ELEC')
# Title
add('TI', f"{self.surface.name} (Version {self.version})")
# Authors
for author in self.authors.split(','):
add('AU', author.strip())
# Publication Year
add('PY', format(self.datetime, '%Y/%m/%d/'))
# URL
add('UR', self.get_full_url(request))
# Name of Database
add('DB', 'contact.engineering')
# Notes
add('N1', self.surface.description)
# add keywords, defaults ones and tags
for kw in DEFAULT_KEYWORDS:
add('KW', kw)
for t in self.surface.tags.all():
add('KW', t.name)
# End of record, must be empty and last tag
add('ER', '')
return s.strip()
def _get_citation_as_bibtex(self, request):
title = f"{self.surface.name} (Version {self.version})"
shortname = f"{self.surface.name}_v{self.version}".lower().replace(' ','_')
keywords = ",".join(DEFAULT_KEYWORDS)
if self.surface.tags.count()>0:
keywords += ","+",".join(t.name for t in self.surface.tags.all())
s = """
@misc{{
{shortname},
title = {{{title}}},
author = {{{author}}},
year = {{{year}}},
note = {{{note}}},
keywords = {{{keywords}}},
howpublished = {{{publication_url}}},
}}
""".format(title=title,
author=self.authors.replace(', ', ' and '),
year=self.datetime.year,
note=self.surface.description,
publication_url=self.get_full_url(request),
keywords=keywords,
shortname=shortname,
)
return s.strip()
def _get_citation_as_biblatex(self, request):
shortname = f"{self.surface.name}_v{self.version}".lower().replace(' ','_')
keywords = ",".join(DEFAULT_KEYWORDS)
if self.surface.tags.count()>0:
keywords += ","+",".join(t.name for t in self.surface.tags.all())
s = """
@online{{
{shortname},
title = {{{title}}},
version = {{{version}}},
author = {{{author}}},
year = {{{year}}},
month = {{{month}}},
date = {{{date}}},
note = {{{note}}},
keywords = {{{keywords}}},
url = {{{url}}},
urldate = {{{urldate}}}
}}
""".format(title=self.surface.name,
version=self.version,
author=self.authors.replace(', ', ' and '),
year=self.datetime.year,
month=self.datetime.month,
date=format(self.datetime, "%Y-%m-%d"),
note=self.surface.description,
url=self.get_full_url(request),
urldate=format(timezone.now(), "%Y-%m-%d"),
keywords=keywords,
shortname=shortname,
)
return s.strip()
@property
def storage_prefix(self):
"""Return prefix used for storage.
https://docs.djangoproject.com/en/2.2/ref/models/fields/#django.db.models.FileField.upload_to
Looks like a relative path to a directory.
If storage is on filesystem, the prefix should correspond
to a real directory.
"""
return "publications/{}/".format(self.short_url)
@property
def container_storage_path(self):
"""Return relative path of container in storage."""
return f"{self.storage_prefix}container.zip"
| [((26, 16, 26, 71), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((27, 14, 27, 107), 'django.db.models.OneToOneField', 'models.OneToOneField', (), '', False, 'from django.db import models\n'), ((28, 23, 29, 88), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((30, 16, 30, 73), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((31, 25, 31, 68), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((32, 14, 32, 52), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import models\n'), ((33, 15, 33, 54), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((34, 14, 34, 95), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((35, 14, 35, 64), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((36, 16, 36, 59), 'django.db.models.FileField', 'models.FileField', (), '', False, 'from django.db import models\n'), ((39, 15, 39, 63), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((60, 15, 60, 27), 'django.utils.safestring.mark_safe', 'mark_safe', ({(60, 25, 60, 26): 's'}, {}), '(s)', False, 'from django.utils.safestring import mark_safe\n'), ((159, 34, 159, 48), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n')] |
rayhu-osu/vcube | vendor/migrations/0003_store_password.py | ff1af048adb8a9f1007368150a78b309b4d821af | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-24 19:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendor', '0002_store_image'),
]
operations = [
migrations.AddField(
model_name='store',
name='password',
field=models.CharField(default=1, max_length=30),
preserve_default=False,
),
]
| [((18, 18, 18, 60), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')] |
widnerlr/isat252 | lec2.py | 4196a8b1c6f4c75c3f5d8f64164014103b695077 | """
Your module description
"""
"""
this is my second py code
for my second lecture
"""
#print ('hello world') # this is a single line commment
# this is my second line comment
#print(type("123."))
#print ("Hello World".upper())
#print("Hello World".lower())
#print("hello" + "world" + ".")
#print(2**3)
#my_str = "hello world"
#print(my_str)
#my_str = "Tom"
#print(my_str)
my_int = 2
my_float = 3.0
print(my_int + my_float) | [] |
Gyaha/AOC2020 | day_22_b.py | fbabae9acd7d274b84bc0c64f2665dfba9f008ca | def play_recursively_combat(p1: list, p2: list) -> bool:
rounds = set()
winner = None
while len(p1) > 0 and len(p2) > 0:
r = tuple(p1 + [-1] + p2)
if r in rounds:
return True
else:
rounds.add(r)
c1 = p1.pop(0)
c2 = p2.pop(0)
if c1 <= len(p1) and c2 <= len(p2):
winner = play_recursively_combat(p1[:c1], p2[:c2])
else:
winner = c1 > c2
if winner:
p1.append(c1)
p1.append(c2)
else:
p2.append(c2)
p2.append(c1)
return winner
def play_combat(s: str):
p1, p2 = s.strip().split("\n\n")
p1, p2 = convert_cards(p1), convert_cards(p2)
winner = play_recursively_combat(p1, p2)
w = p1 if winner else p2
s = 0
for i, c in enumerate(reversed(w), 1):
s += c * i
return s
def convert_cards(s: str) -> list:
c = []
for p in s.splitlines()[1:]:
c.append(int(p))
return c
def run_tests():
test_input = """Player 1:
9
2
6
3
1
Player 2:
5
8
4
7
10"""
test_output = 291
assert play_combat(test_input) == test_output
test_input = """Player 1:
43
19
Player 2:
2
29
14"""
assert play_combat(test_input)
def run() -> int:
with open("inputs/input_22.txt") as file:
data = file.read()
return play_combat(data)
if __name__ == "__main__":
run_tests()
import time
time_start = time.perf_counter()
print(run())
time_end = time.perf_counter() - time_start
print(f"Time: {time_end:0.4f} sec")
| [((86, 17, 86, 36), 'time.perf_counter', 'time.perf_counter', ({}, {}), '()', False, 'import time\n'), ((88, 15, 88, 34), 'time.perf_counter', 'time.perf_counter', ({}, {}), '()', False, 'import time\n')] |
sundayz/idewave-core | Auth/Constants/LoginOpCode.py | 5bdb88892173c9c3e8c85f431cf9b5dbd9f23941 | from enum import Enum
class LoginOpCode(Enum):
''' Opcodes during login process '''
LOGIN_CHALL = 0x00
LOGIN_PROOF = 0x01
RECON_CHALL = 0x02 # currently do not in use
RECON_PROOF = 0x03 # currently do not in use
REALMLIST = 0x10
class LoginResult(Enum):
''' Error codes '''
SUCCESS = 0x00
| [] |
pratannaimjoi/tokenIpad | LINETOKEN/__init__.py | f03969c05427bc1804d05c42823a28725c7e38a0 | # -*- coding: utf-8 -*-
from .LineApi import LINE
from .lib.Gen.ttypes import *
| [] |
seton-develops/PDF-Camelot-Folder-Executable | main.py | 168b5c24afe8884cf121a4207d7d3cb3ee7cc626 | '''
Created on Jun 17, 2021
@author: Sean
'''
import PDF2CSV_GUI
def main():
j = PDF2CSV_GUI.Convert_GUI()
if __name__ == "__main__":
main() | [((9, 8, 9, 33), 'PDF2CSV_GUI.Convert_GUI', 'PDF2CSV_GUI.Convert_GUI', ({}, {}), '()', False, 'import PDF2CSV_GUI\n')] |
Mildlyoffbeat/RedditBot-1 | Part1/bot_read.py | f65c3c4d0f3d6d3a468069d4a009b44a20e33797 | #!/usr/bin/python
import praw
reddit = praw.Reddit('mob-secondbot')
subreddit = reddit.subreddit("learnpython")
for submission in subreddit.hot(limit=5):
print("Title: ", submission.title)
print("Text: ", submission.selftext)
print("Score: ", submission.score)
print("---------------------------------\n")
| [((4, 9, 4, 37), 'praw.Reddit', 'praw.Reddit', ({(4, 21, 4, 36): '"""mob-secondbot"""'}, {}), "('mob-secondbot')", False, 'import praw\n')] |
Siketyan/Programming-I | 17/kazuate_liar.cpp.py | 0749c1ae045d53cd8a67f0de7ab13c26030ddd74 | from subprocess import Popen, PIPE, call
name = "kazuate_liar.o"
src = """
#include <iostream>
#include <random>
using namespace std;
int main()
{
random_device rd;
mt19937 mt(rd());
uniform_int_distribution<int> randfive(0, 4);
uniform_int_distribution<int> randint(1, 100);
int count = 0;
int num = randint(mt);
while (1)
{
int i;
cout << "数を当ててみて ";
cin >> i;
if (i < 1 || i > 100)
{
cout << "不正な入力です。" << endl;
continue;
}
count++;
bool liar = randfive(mt) == 0;
if (i == num)
{
cout << "正解です。おめでとう。 (" << count << " 回目)" << endl;
break;
}
else if ((liar && i > num) || i < num)
{
cout << "もっと大きいよ。" << endl;
}
else
{
cout << "もっと小さいよ。" << endl;
}
}
return 0;
}
""";
proc = Popen(["g++", "-o", name, "-x", "c++", "-"], stdin = PIPE);
proc.communicate(src.encode());
call(["./" + name]);
| [((56, 7, 56, 65), 'subprocess.Popen', 'Popen', (), '', False, 'from subprocess import Popen, PIPE, call\n'), ((58, 0, 58, 19), 'subprocess.call', 'call', ({(58, 5, 58, 18): "['./' + name]"}, {}), "(['./' + name])", False, 'from subprocess import Popen, PIPE, call\n')] |
silvercar/terrafort | src/terrafort/main.py | bdf9cb5d7f58d10a0c295c01b3a5620fdcc2876c | """
Terrafort
Generate terraform templates for specific resources
"""
import click
from .providers.aws import Aws
@click.group()
@click.option('--commands',
is_flag=True,
help="Output import commands instead of a terraform template")
@click.version_option()
@click.pass_context
def cli(ctx, commands=False):
ctx.obj = {'commands': commands}
cli.add_command(Aws.aws_db_instance)
cli.add_command(Aws.aws_iam_instance_profile)
cli.add_command(Aws.aws_instance)
cli.add_command(Aws.aws_security_group)
if __name__ == "__main__":
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
cli(obj={})
| [((11, 1, 11, 14), 'click.group', 'click.group', ({}, {}), '()', False, 'import click\n'), ((12, 1, 14, 76), 'click.option', 'click.option', (), '', False, 'import click\n'), ((15, 1, 15, 23), 'click.version_option', 'click.version_option', ({}, {}), '()', False, 'import click\n')] |
jnsougata/rich-embed | src/ping.py | 95901e590f00c4e4eabeb99c8f06bb5f90718d80 | import discord
import app_util
class Ping(app_util.Cog):
def __init__(self, bot: app_util.Bot):
self.bot = bot
@app_util.Cog.command(
command=app_util.SlashCommand(
name='ping', description='shows avg ping of client'
),
guild_id=877399405056102431
)
async def command(self, ctx: app_util.Context):
await ctx.send_response(embed=discord.Embed(title=f'{self.bot.latency * 1000:.2f}ms'))
def setup(bot: app_util.Bot):
bot.add_application_cog(Ping(bot))
| [((11, 16, 13, 9), 'app_util.SlashCommand', 'app_util.SlashCommand', (), '', False, 'import app_util\n'), ((17, 38, 17, 93), 'discord.Embed', 'discord.Embed', (), '', False, 'import discord\n')] |
AlbertVeli/AdventOfCode | 2020/24/visualization.py | 3d3473695318a0686fac720a1a21dd3629f09e33 | #!/usr/bin/env python3
import sys
import re
import numpy as np
from PIL import Image
moves = { 'e': (2, 0), 'se': (1, 2), 'sw': (-1, 2), 'w': (-2, 0), 'nw': (-1, -2), 'ne': (1, -2) }
# Save (x, y): True/False in tiles. True = black, False = white.
tiles = {}
for line in open(sys.argv[1]).read().splitlines():
pos = np.array((0, 0))
for d in re.findall(r'e|se|sw|w|nw|ne', line):
pos += moves[d]
t = tuple(pos)
if t in tiles:
tiles[t] = not tiles[t]
else:
tiles[t] = True
# Part 1
print('black:', sum(val == True for val in tiles.values()))
# -- Part 2 --
# take a chance on how wide it needs to be
width = 300
heigth = 300
board = np.zeros(width * heigth, dtype=np.int8)
board = board.reshape(heigth, width)
# Fill in tiles, move to center
for key, value in tiles.items():
x, y = key
x += width // 2
y += heigth // 2
board[y][x] = value
def black_neighbours(y, x, b):
num = 0
for m in moves.values():
num += b[(y + m[1], x + m[0])]
return num
def game():
board_copy = np.copy(board)
w, h = board.shape
# Don't do outer edge (to avoid special cases)
for y in range(2, h - 2):
for x in range(2, w - 2):
tile = board_copy[(y, x)]
n = black_neighbours(y, x, board_copy)
if tile:
# black
if n == 0 or n > 2:
board[(y, x)] = False
else:
# white
if n == 2:
board[(y, x)] = True
def save_image(day):
colours = [(0, 0, 0), (255, 255, 255)]
im = Image.new('RGB', (width, heigth))
for y in range(heigth):
for x in range(width):
c = colours[board[y][x]]
im.putpixel((x, y), c)
im.save('img%03d.png' % (day))
save_image(0)
for day in range(1, 101):
game()
save_image(day)
print('Day %d: %d' % (day, len(np.where(board == True)[0])))
ys, xs = np.where(board)
print(min(ys), max(ys), min(xs), max(xs))
| [((31, 8, 31, 47), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((79, 9, 79, 24), 'numpy.where', 'np.where', ({(79, 18, 79, 23): 'board'}, {}), '(board)', True, 'import numpy as np\n'), ((14, 10, 14, 26), 'numpy.array', 'np.array', ({(14, 19, 14, 25): '(0, 0)'}, {}), '((0, 0))', True, 'import numpy as np\n'), ((15, 13, 15, 49), 're.findall', 're.findall', ({(15, 24, 15, 42): '"""e|se|sw|w|nw|ne"""', (15, 44, 15, 48): 'line'}, {}), "('e|se|sw|w|nw|ne', line)", False, 'import re\n'), ((48, 17, 48, 31), 'numpy.copy', 'np.copy', ({(48, 25, 48, 30): 'board'}, {}), '(board)', True, 'import numpy as np\n'), ((66, 9, 66, 42), 'PIL.Image.new', 'Image.new', ({(66, 19, 66, 24): '"""RGB"""', (66, 26, 66, 41): '(width, heigth)'}, {}), "('RGB', (width, heigth))", False, 'from PIL import Image\n'), ((77, 35, 77, 58), 'numpy.where', 'np.where', ({(77, 44, 77, 57): '(board == True)'}, {}), '(board == True)', True, 'import numpy as np\n')] |
BearerPipelineTest/catapult | experimental/tracing/bin/diff_heap_profiler.py | 3800a67cd916200046a50748893bbd0dcf3d7f4a | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import print_function
import argparse
import gzip
import json
import os
import shutil
import six
from six.moves import zip
_OUTPUT_DIR = 'output'
_OUTPUT_GRAPH_DIR = os.path.join(_OUTPUT_DIR, 'graph')
class Process(object):
def __init__(self):
self.pid = None
self.name = None
self.labels = None
self.types = {}
self.strings = {}
self.stackframes = {}
self.allocators = None
self.version = None
class Entry(object):
def __init__(self):
self.count = None
self.size = None
self.type = None
self.stackframe = None
class GraphDump(object):
def __init__(self):
self.pid = None
self.name = None
self.labels = None
self.heap = None
self.root = ''
self.leaks = ''
self.leak_stackframes = 0
self.leak_objects = 0
def OpenTraceFile(file_path, mode):
if file_path.endswith('.gz'):
return gzip.open(file_path, mode + 'b')
return open(file_path, mode + 't')
def FindMemoryDumps(filename):
processes = {}
with OpenTraceFile(filename, 'r') as f:
data = json.loads(f.read().decode('utf-8'))
for event in data['traceEvents']:
pid = event['pid']
if pid not in processes:
processes[pid] = Process()
processes[pid].pid = pid
process = processes[pid]
# Retrieve process informations.
if event['ph'] == 'M':
if event['name'] == 'process_name' and 'name' in event['args']:
process.name = event['args']['name']
if event['name'] == 'process_labels' and 'labels' in event['args']:
process.labels = event['args']['labels']
if event['name'] == 'typeNames':
process.types = {}
for type_id, t in six.iteritems(event['args']['typeNames']):
process.types[int(type_id)] = t
if event['name'] == 'stackFrames':
process.stackframes = {}
for stack_id, s in six.iteritems(event['args']['stackFrames']):
new_stackframe = {}
new_stackframe['name'] = s['name']
if 'parent' in s:
new_stackframe['parent'] = int(s['parent'])
process.stackframes[int(stack_id)] = new_stackframe
# Look for a detailed memory dump event.
if not ((event['name'] == 'periodic_interval' or
event['name'] == 'explicitly_triggered') and
event['args']['dumps']['level_of_detail'] == 'detailed'):
continue
# Check for a memory dump V1.
if u'heaps' in event['args']['dumps']:
# Get the first memory dump.
if not process.allocators:
process.version = 1
process.allocators = event['args']['dumps']['heaps']
# Check for a memory dump V2.
# See format: [chromium] src/base/trace_event/heap_profiler_event_writer.h
if u'heaps_v2' in event['args']['dumps']:
# Memory dump format V2 is dumping information incrementally. Update
# the cumulated indexes.
maps = event['args']['dumps']['heaps_v2']['maps']
for string in maps['strings']:
process.strings[string['id']] = string['string']
for node in maps['nodes']:
node_v1 = {}
node_v1['name'] = process.strings[node['name_sid']]
if 'parent' in node:
node_v1['parent'] = node['parent']
process.stackframes[node['id']] = node_v1
for t in maps['types']:
process.types[t['id']] = process.strings[t['name_sid']]
# Get the first memory dump.
if not process.allocators:
dump = event['args']['dumps']
process.version = 2
process.allocators = dump['heaps_v2']['allocators']
# Remove processes with incomplete memory dump.
for pid, process in processes.items():
if not (process.allocators and process.stackframes and process.types):
del processes[pid]
return processes
def ResolveMemoryDumpFields(entries, stackframes, types):
def ResolveStackTrace(stack_id, stackframes):
stackframe = stackframes[stack_id]
tail = ()
if 'parent' in stackframe:
tail = ResolveStackTrace(stackframe['parent'], stackframes)
name = stackframe['name'].replace('\r', '').replace('\n', '')
return (name,) + tail
def ResolveType(type_id, types):
return types[type_id]
for entry in entries:
# Stackframe may be -1 (18446744073709551615L) when not stackframe are
# available.
if entry.stackframe not in stackframes:
entry.stackframe = []
else:
entry.stackframe = ResolveStackTrace(entry.stackframe, stackframes)
entry.type = ResolveType(entry.type, types)
def IncrementHeapEntry(stack, count, size, typename, root):
if not stack:
root['count'] += count
root['size'] += size
if typename not in root['count_by_type']:
root['count_by_type'][typename] = 0
root['count_by_type'][typename] += count
else:
top = stack[-1]
tail = stack[:-1]
if top not in root['children']:
new_node = {}
new_node['count'] = 0
new_node['size'] = 0
new_node['children'] = {}
new_node['count_by_type'] = {}
root['children'][top] = new_node
IncrementHeapEntry(tail, count, size, typename, root['children'][top])
def CanonicalHeapEntries(root):
total_count = 0
total_size = 0
for child in six.itervalues(root['children']):
total_count += child['count']
total_size += child['size']
root['count'] -= total_count
root['size'] -= total_size
for typename in root['count_by_type']:
total_count_for_type = 0
for child in six.itervalues(root['children']):
if typename in child['count_by_type']:
total_count_for_type += child['count_by_type'][typename]
root['count_by_type'][typename] -= total_count_for_type
for child in six.itervalues(root['children']):
CanonicalHeapEntries(child)
def FindLeaks(root, stack, leaks, threshold, size_threshold):
for frame in root['children']:
FindLeaks(root['children'][frame], [frame] + stack, leaks, threshold,
size_threshold)
if root['count'] > threshold and root['size'] > size_threshold:
leaks.append({'count': root['count'],
'size': root['size'],
'count_by_type': root['count_by_type'],
'stackframes': stack})
def DumpTree(root, frame, output, threshold, size_threshold):
output.write('\n{ \"name\": \"%s\",' % frame)
if root['count'] > threshold and root['count'] > size_threshold:
output.write(' \"size\": \"%s\",' % root['size'])
output.write(' \"count\": \"%s\",' % root['count'])
output.write(' \"children\": [')
is_first = True
for child_frame, child in root['children'].items():
if is_first:
is_first = False
else:
output.write(',')
DumpTree(child, child_frame, output, threshold, size_threshold)
output.write(']')
output.write('}')
def GetEntries(heap, process):
"""
Returns all entries in a heap, after filtering out unknown entries, and doing
some post processing to extract the relevant fields.
"""
if not process:
return []
entries = []
if process.version == 1:
for raw_entry in process.allocators[heap]['entries']:
# Cumulative sizes and types are skipped. see:
# https://chromium.googlesource.com/chromium/src/+/a990af190304be5bf38b120799c594df5a293518/base/trace_event/heap_profiler_heap_dump_writer.cc#294
if 'type' not in raw_entry or not raw_entry['bt']:
continue
entry = Entry()
entry.count = int(raw_entry['count'], 16)
entry.size = int(raw_entry['size'], 16)
entry.type = int(raw_entry['type'])
entry.stackframe = int(raw_entry['bt'])
entries.append(entry)
elif process.version == 2:
raw_entries = list(zip(process.allocators[heap]['counts'],
process.allocators[heap]['sizes'],
process.allocators[heap]['types'],
process.allocators[heap]['nodes']))
for (raw_count, raw_size, raw_type, raw_stackframe) in raw_entries:
entry = Entry()
entry.count = raw_count
entry.size = raw_size
entry.type = raw_type
entry.stackframe = raw_stackframe
entries.append(entry)
# Resolve fields by looking into indexes
ResolveMemoryDumpFields(entries, process.stackframes, process.types)
return entries
def FilterProcesses(processes, filter_by_name, filter_by_labels):
remaining_processes = {}
for pid, process in six.iteritems(processes):
if filter_by_name and process.name != filter_by_name:
continue
if (filter_by_labels and
(not process.labels or filter_by_labels not in process.labels)):
continue
remaining_processes[pid] = process
return remaining_processes
def FindRelevantProcesses(start_trace, end_trace,
filter_by_name,
filter_by_labels,
match_by_labels):
# Retrieve the processes and the associated memory dump.
end_processes = FindMemoryDumps(end_trace)
end_processes = FilterProcesses(end_processes, filter_by_name,
filter_by_labels)
start_processes = None
if start_trace:
start_processes = FindMemoryDumps(start_trace)
start_processes = FilterProcesses(start_processes, filter_by_name,
filter_by_labels)
# Build a sequence of pair of processes to be compared.
processes = []
if not start_processes:
# Only keep end-processes.
for _, end_process in six.iteritems(end_processes):
processes.append((None, end_process))
elif match_by_labels:
# Processes are paired based on name/labels.
for _, end_process in six.iteritems(end_processes):
matching_start_process = None
for _, start_process in six.iteritems(start_processes):
if (start_process.name == end_process.name and
(start_process.name in ['Browser', 'GPU'] or
start_process.labels == end_process.labels)):
matching_start_process = start_process
if matching_start_process:
processes.append((matching_start_process, end_process))
else:
# Processes are paired based on their PID.
relevant_pids = set(end_processes.keys()) & set(start_processes.keys())
for pid in relevant_pids:
start_process = start_processes[pid]
end_process = end_processes[pid]
processes.append((start_process, end_process))
return processes
def BuildGraphDumps(processes, threshold, size_threshold):
"""
Build graph for a sequence of pair of processes.
If start_process is None, counts objects in end_trace.
Otherwise, counts objects present in end_trace, but not in start_process.
"""
graph_dumps = []
for (start_process, end_process) in processes:
pid = end_process.pid
name = end_process.name if end_process.name else ''
labels = end_process.labels if end_process.labels else ''
print('Process[%d] %s: %s' % (pid, name, labels))
for heap in end_process.allocators:
start_entries = GetEntries(heap, start_process)
end_entries = GetEntries(heap, end_process)
graph = GraphDump()
graph.pid = pid
graph.name = name
graph.labels = labels
graph.heap = heap
graph_dumps.append(graph)
# Do the math: diffing start and end memory dumps.
root = {}
root['count'] = 0
root['size'] = 0
root['children'] = {}
root['count_by_type'] = {}
for entry in start_entries:
if entry.type:
IncrementHeapEntry(entry.stackframe, - entry.count, - entry.size,
entry.type, root)
for entry in end_entries:
if entry.type:
IncrementHeapEntry(entry.stackframe, entry.count, entry.size,
entry.type, root)
CanonicalHeapEntries(root)
graph.root = root
# Find leaks
leaks = []
FindLeaks(root, [], leaks, threshold, size_threshold)
leaks.sort(reverse=True, key=lambda k: k['size'])
if leaks:
print(' %s: %d potential leaks found.' % (heap, len(leaks)))
graph.leaks = leaks
graph.leak_stackframes = len(leaks)
for leak in leaks:
graph.leak_objects += leak['count']
return graph_dumps
def WritePotentialLeaks(graph_dumps):
for graph in graph_dumps:
if graph.leaks:
filename = 'process_%d_%s-leaks.json' % (graph.pid, graph.heap)
output_filename = os.path.join(_OUTPUT_DIR, filename)
with open(output_filename, 'w') as output:
json.dump(graph.leaks, output)
def WriteGrahDumps(graph_dumps, threshold, size_threshold):
for graph in graph_dumps:
# Dump the remaining allocated objects tree.
filename = 'process_%d_%s-objects.json' % (graph.pid, graph.heap)
output_filename = os.path.join(_OUTPUT_GRAPH_DIR, filename)
if graph.root:
with open(output_filename, 'w') as output:
DumpTree(graph.root, '.', output, threshold, size_threshold)
graph.root = filename
def WriteIndex(graph_dumps):
output_filename = os.path.join(_OUTPUT_GRAPH_DIR, 'index.json')
with open(output_filename, 'w') as output:
json.dump([
{'pid': graph.pid,
'heap': graph.heap,
'name': graph.name,
'labels': graph.labels,
'objects': graph.root,
'potential leaks': graph.leak_stackframes,
'objects leaked': graph.leak_objects,
}
for graph in graph_dumps], output)
def WriteHTML():
# Copy the HTML page.
source = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'diff_heap_profiler.html')
destination = os.path.join(_OUTPUT_GRAPH_DIR, 'index.html')
shutil.copyfile(source, destination)
# Copy the D3 library file.
source = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir,
os.path.pardir,
os.path.pardir,
'tracing',
'third_party',
'd3',
'd3.min.js')
destination = os.path.join(_OUTPUT_GRAPH_DIR, 'd3.min.js')
shutil.copyfile(source, destination)
def Main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--flame-graph',
action='store_true',
help='Output a flame graph based on stackframe allocations')
parser.add_argument(
'--threshold',
type=int,
default=0,
help='Objects threshold for being a potential memory leak')
parser.add_argument(
'--size-threshold',
type=int,
default=0,
help='Size threshold for being a potential memory leak')
parser.add_argument(
'--filter-by-name',
type=str,
help='Only keep processes with name (i.e. Browser, Renderer, ...)')
parser.add_argument(
'--filter-by-labels',
type=str,
help='Only keep processes with matching labels')
parser.add_argument(
'--match-by-labels',
action='store_true',
help='Match processes between runs by labels')
parser.add_argument(
'trace',
nargs='+',
help='Trace files to be processed')
options = parser.parse_args()
if options.threshold == 0 and options.size_threshold == 0:
options.threshold = 1000
if len(options.trace) == 1:
end_trace = options.trace[0]
start_trace = None
else:
start_trace = options.trace[0]
end_trace = options.trace[1]
if not os.path.exists(_OUTPUT_DIR):
os.makedirs(_OUTPUT_DIR)
# Find relevant processes to be processed.
processes = FindRelevantProcesses(start_trace, end_trace,
options.filter_by_name,
options.filter_by_labels,
options.match_by_labels)
graph_dumps = BuildGraphDumps(processes, options.threshold,
options.size_threshold)
WritePotentialLeaks(graph_dumps)
if options.flame_graph:
if not os.path.exists(_OUTPUT_GRAPH_DIR):
os.makedirs(_OUTPUT_GRAPH_DIR)
WriteGrahDumps(graph_dumps, options.threshold, options.size_threshold)
WriteIndex(graph_dumps)
WriteHTML()
if __name__ == '__main__':
Main()
| [((17, 20, 17, 54), 'os.path.join', 'os.path.join', ({(17, 33, 17, 44): '_OUTPUT_DIR', (17, 46, 17, 53): '"""graph"""'}, {}), "(_OUTPUT_DIR, 'graph')", False, 'import os\n'), ((186, 15, 186, 47), 'six.itervalues', 'six.itervalues', ({(186, 30, 186, 46): "root['children']"}, {}), "(root['children'])", False, 'import six\n'), ((199, 15, 199, 47), 'six.itervalues', 'six.itervalues', ({(199, 30, 199, 46): "root['children']"}, {}), "(root['children'])", False, 'import six\n'), ((276, 22, 276, 46), 'six.iteritems', 'six.iteritems', ({(276, 36, 276, 45): 'processes'}, {}), '(processes)', False, 'import six\n'), ((413, 20, 413, 65), 'os.path.join', 'os.path.join', ({(413, 33, 413, 50): '_OUTPUT_GRAPH_DIR', (413, 52, 413, 64): '"""index.json"""'}, {}), "(_OUTPUT_GRAPH_DIR, 'index.json')", False, 'import os\n'), ((431, 16, 431, 61), 'os.path.join', 'os.path.join', ({(431, 29, 431, 46): '_OUTPUT_GRAPH_DIR', (431, 48, 431, 60): '"""index.html"""'}, {}), "(_OUTPUT_GRAPH_DIR, 'index.html')", False, 'import os\n'), ((432, 2, 432, 38), 'shutil.copyfile', 'shutil.copyfile', ({(432, 18, 432, 24): 'source', (432, 26, 432, 37): 'destination'}, {}), '(source, destination)', False, 'import shutil\n'), ((443, 16, 443, 60), 'os.path.join', 'os.path.join', ({(443, 29, 443, 46): '_OUTPUT_GRAPH_DIR', (443, 48, 443, 59): '"""d3.min.js"""'}, {}), "(_OUTPUT_GRAPH_DIR, 'd3.min.js')", False, 'import os\n'), ((444, 2, 444, 38), 'shutil.copyfile', 'shutil.copyfile', ({(444, 18, 444, 24): 'source', (444, 26, 444, 37): 'destination'}, {}), '(source, destination)', False, 'import shutil\n'), ((448, 11, 448, 36), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((56, 11, 56, 43), 'gzip.open', 'gzip.open', ({(56, 21, 56, 30): 'file_path', (56, 32, 56, 42): "(mode + 'b')"}, {}), "(file_path, mode + 'b')", False, 'import gzip\n'), ((194, 17, 194, 49), 'six.itervalues', 'six.itervalues', ({(194, 32, 194, 48): "root['children']"}, {}), "(root['children'])", False, 'import six\n'), ((306, 26, 306, 54), 'six.iteritems', 'six.iteritems', ({(306, 40, 306, 53): 'end_processes'}, {}), '(end_processes)', False, 'import six\n'), ((405, 22, 405, 63), 'os.path.join', 'os.path.join', ({(405, 35, 405, 52): '_OUTPUT_GRAPH_DIR', (405, 54, 405, 62): 'filename'}, {}), '(_OUTPUT_GRAPH_DIR, filename)', False, 'import os\n'), ((415, 4, 424, 42), 'json.dump', 'json.dump', ({(415, 14, 424, 33): "[{'pid': graph.pid, 'heap': graph.heap, 'name': graph.name, 'labels': graph\n .labels, 'objects': graph.root, 'potential leaks': graph.\n leak_stackframes, 'objects leaked': graph.leak_objects} for graph in\n graph_dumps]", (424, 35, 424, 41): 'output'}, {}), "([{'pid': graph.pid, 'heap': graph.heap, 'name': graph.name,\n 'labels': graph.labels, 'objects': graph.root, 'potential leaks': graph\n .leak_stackframes, 'objects leaked': graph.leak_objects} for graph in\n graph_dumps], output)", False, 'import json\n'), ((491, 9, 491, 36), 'os.path.exists', 'os.path.exists', ({(491, 24, 491, 35): '_OUTPUT_DIR'}, {}), '(_OUTPUT_DIR)', False, 'import os\n'), ((492, 4, 492, 28), 'os.makedirs', 'os.makedirs', ({(492, 16, 492, 27): '_OUTPUT_DIR'}, {}), '(_OUTPUT_DIR)', False, 'import os\n'), ((310, 26, 310, 54), 'six.iteritems', 'six.iteritems', ({(310, 40, 310, 53): 'end_processes'}, {}), '(end_processes)', False, 'import six\n'), ((396, 24, 396, 59), 'os.path.join', 'os.path.join', ({(396, 37, 396, 48): '_OUTPUT_DIR', (396, 50, 396, 58): 'filename'}, {}), '(_OUTPUT_DIR, filename)', False, 'import os\n'), ((429, 40, 429, 65), 'os.path.abspath', 'os.path.abspath', ({(429, 56, 429, 64): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((435, 40, 435, 65), 'os.path.abspath', 'os.path.abspath', ({(435, 56, 435, 64): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((506, 11, 506, 44), 'os.path.exists', 'os.path.exists', ({(506, 26, 506, 43): '_OUTPUT_GRAPH_DIR'}, {}), '(_OUTPUT_GRAPH_DIR)', False, 'import os\n'), ((507, 6, 507, 36), 'os.makedirs', 'os.makedirs', ({(507, 18, 507, 35): '_OUTPUT_GRAPH_DIR'}, {}), '(_OUTPUT_GRAPH_DIR)', False, 'import os\n'), ((81, 26, 81, 67), 'six.iteritems', 'six.iteritems', ({(81, 40, 81, 66): "event['args']['typeNames']"}, {}), "(event['args']['typeNames'])", False, 'import six\n'), ((86, 27, 86, 70), 'six.iteritems', 'six.iteritems', ({(86, 41, 86, 69): "event['args']['stackFrames']"}, {}), "(event['args']['stackFrames'])", False, 'import six\n'), ((256, 23, 259, 56), 'six.moves.zip', 'zip', ({(256, 27, 256, 61): "process.allocators[heap]['counts']", (257, 22, 257, 55): "process.allocators[heap]['sizes']", (258, 22, 258, 55): "process.allocators[heap]['types']", (259, 22, 259, 55): "process.allocators[heap]['nodes']"}, {}), "(process.allocators[heap]['counts'], process.allocators[heap]['sizes'],\n process.allocators[heap]['types'], process.allocators[heap]['nodes'])", False, 'from six.moves import zip\n'), ((312, 30, 312, 60), 'six.iteritems', 'six.iteritems', ({(312, 44, 312, 59): 'start_processes'}, {}), '(start_processes)', False, 'import six\n'), ((398, 8, 398, 38), 'json.dump', 'json.dump', ({(398, 18, 398, 29): 'graph.leaks', (398, 31, 398, 37): 'output'}, {}), '(graph.leaks, output)', False, 'import json\n')] |
kingjr/mne-bids | mne_bids/commands/mne_bids_raw_to_bids.py | 3a4543076912cebbc89a5f0b9433cda1b9e288b8 | """Write raw files to BIDS format.
example usage: $ mne_bids raw_to_bids --subject_id sub01 --task rest
--raw data.edf --bids_root new_path
"""
# Authors: Teon Brooks <[email protected]>
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
import mne_bids
from mne_bids import write_raw_bids, BIDSPath
from mne_bids.read import _read_raw
def run():
"""Run the raw_to_bids command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__, usage="usage: %prog options args",
prog_prefix='mne_bids',
version=mne_bids.__version__)
parser.add_option('--subject_id', dest='subject_id',
help=('subject name in BIDS compatible format '
'(01, 02, etc.)'))
parser.add_option('--task', dest='task',
help='name of the task the data is based on')
parser.add_option('--raw', dest='raw_fname',
help='path to the raw MEG file')
parser.add_option('--bids_root', dest='bids_root',
help='The path of the BIDS compatible folder.')
parser.add_option('--session_id', dest='session_id',
help='session name in BIDS compatible format')
parser.add_option('--run', dest='run',
help='run number for this dataset')
parser.add_option('--acq', dest='acq',
help='acquisition parameter for this dataset')
parser.add_option('--events_data', dest='events_data',
help='events file (events.tsv)')
parser.add_option('--event_id', dest='event_id',
help='event id dict', metavar='eid')
parser.add_option('--hpi', dest='hpi',
help='path to the MEG marker points')
parser.add_option('--electrode', dest='electrode',
help='path to head-native digitizer points')
parser.add_option('--hsp', dest='hsp',
help='path to headshape points')
parser.add_option('--config', dest='config',
help='path to the configuration file')
parser.add_option('--overwrite', dest='overwrite',
help="whether to overwrite existing data (BOOLEAN)")
parser.add_option('--line_freq', dest='line_freq',
help="The frequency of the line noise in Hz "
"(e.g. 50 or 60). If unknown, pass None")
opt, args = parser.parse_args()
if len(args) > 0:
parser.print_help()
parser.error('Do not specify arguments without flags. Found: "{}".\n'
.format(args))
if not all([opt.subject_id, opt.task, opt.raw_fname, opt.bids_root]):
parser.print_help()
parser.error('Arguments missing. You need to specify at least the'
'following: --subject_id, --task, --raw, --bids_root.')
bids_path = BIDSPath(
subject=opt.subject_id, session=opt.session_id, run=opt.run,
acquisition=opt.acq, task=opt.task, root=opt.bids_root)
allow_maxshield = False
if opt.raw_fname.endswith('.fif'):
allow_maxshield = True
raw = _read_raw(opt.raw_fname, hpi=opt.hpi, electrode=opt.electrode,
hsp=opt.hsp, config=opt.config,
allow_maxshield=allow_maxshield)
if opt.line_freq is not None:
line_freq = None if opt.line_freq == "None" else opt.line_freq
raw.info['line_freq'] = line_freq
write_raw_bids(raw, bids_path, event_id=opt.event_id,
events_data=opt.events_data, overwrite=opt.overwrite,
verbose=True)
if __name__ == '__main__':
run()
| [((20, 13, 22, 56), 'mne.commands.utils.get_optparser', 'get_optparser', (), '', False, 'from mne.commands.utils import get_optparser\n'), ((69, 16, 71, 63), 'mne_bids.BIDSPath', 'BIDSPath', (), '', False, 'from mne_bids import write_raw_bids, BIDSPath\n'), ((77, 10, 79, 52), 'mne_bids.read._read_raw', '_read_raw', (), '', False, 'from mne_bids.read import _read_raw\n'), ((83, 4, 85, 32), 'mne_bids.write_raw_bids', 'write_raw_bids', (), '', False, 'from mne_bids import write_raw_bids, BIDSPath\n')] |
NastiaK/NewRepository | lab1oop.py | d1907fc2e159dc1831071d7c79e20bbfb47fb822 | class Calculations:
def __init__(self, first, second):
self.first = first
self.second = second
def add(self):
print(self.first + self.second)
def subtract(self):
print(self.first - self.second)
def multiply(self):
print(self.first * self.second)
def divide(self):
if second == 0:
print("Can't divide by zero")
else:
print(self.first / self.second)
def main():
print("Calculator has started")
while True:
a = float(input("Enter first number "))
b = float(input("Enter second number "))
chooseop = 1
calc=Calculations(a, b)
while (chooseop == 1) | (chooseop == 2) | (chooseop == 3) | (chooseop == 4):
chooseop = int(input("Enter 1 for addition, 2 for subtraction, 3 for multiplication and 4 for division "))
print(chooseop)
if chooseop == 1:
calc.add()
break
elif chooseop == 2:
calc.subtract()
break
elif chooseop == 3:
calc.multiply()
break
elif chooseop == 4:
calc.divide()
break
elif (chooseop != 1) & (chooseop != 2) & (chooseop != 3) & (chooseop != 4):
print("Invalid operation number")
if __name__ == "__main__":
main()
| [] |
Jeans212/codility-dev-training | Arrays/cyclic_rotation.py | 9c5118c6433ea210d1485a6127712a92496e2bc2 | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
'''
Rotate an array A to the right by a given number of steps K.
Covert the array to a deque
Apply the rotate() method the rotate the deque in positive K steps
Convert the deque to array
'''
from collections import deque
def solution(A, K):
# write your code in Python 3.6
deq_A = deque(A)
deq_A.rotate(K)
return list(deq_A)
| [((17, 12, 17, 20), 'collections.deque', 'deque', ({(17, 18, 17, 19): 'A'}, {}), '(A)', False, 'from collections import deque\n')] |
hatzel/markdown-spoilers | tests/test_apis.py | 1964f298f0e8b99f1202d36ccc7d8cf7d613ad26 | # -*- coding: utf-8 -*-
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
Python-Markdown Regression Tests
================================
Tests of the various APIs with the python markdown lib.
"""
from __future__ import unicode_literals
import unittest
import sys
import os
import markdown
import warnings
from markdown.__main__ import parse_options
from logging import DEBUG, WARNING, CRITICAL
import yaml
import tempfile
from io import BytesIO
from xml.etree.ElementTree import ProcessingInstruction
PY3 = sys.version_info[0] == 3
if not PY3:
def bytes(string, encoding):
return string.encode(encoding)
class TestMarkdownBasics(unittest.TestCase):
""" Tests basics of the Markdown class. """
def setUp(self):
""" Create instance of Markdown. """
self.md = markdown.Markdown()
def testBlankInput(self):
""" Test blank input. """
self.assertEqual(self.md.convert(''), '')
def testWhitespaceOnly(self):
""" Test input of only whitespace. """
self.assertEqual(self.md.convert(' '), '')
def testSimpleInput(self):
""" Test simple input. """
self.assertEqual(self.md.convert('foo'), '<p>foo</p>')
def testInstanceExtension(self):
""" Test Extension loading with a class instance. """
from markdown.extensions.footnotes import FootnoteExtension
markdown.Markdown(extensions=[FootnoteExtension()])
def testEntryPointExtension(self):
""" Test Extension loading with an entry point. """
markdown.Markdown(extensions=['footnotes'])
def testDotNotationExtension(self):
""" Test Extension loading with Name (`path.to.module`). """
markdown.Markdown(extensions=['markdown.extensions.footnotes'])
def testDotNotationExtensionWithClass(self):
""" Test Extension loading with class name (`path.to.module:Class`). """
markdown.Markdown(extensions=['markdown.extensions.footnotes:FootnoteExtension'])
class TestConvertFile(unittest.TestCase):
""" Tests of ConvertFile. """
def setUp(self):
self.saved = sys.stdin, sys.stdout
sys.stdin = BytesIO(bytes('foo', encoding='utf-8'))
sys.stdout = BytesIO()
def tearDown(self):
sys.stdin, sys.stdout = self.saved
def getTempFiles(self, src):
""" Return the file names for two temp files. """
infd, infile = tempfile.mkstemp(suffix='.txt')
with os.fdopen(infd, 'w') as fp:
fp.write(src)
outfd, outfile = tempfile.mkstemp(suffix='.html')
return infile, outfile, outfd
def testFileNames(self):
infile, outfile, outfd = self.getTempFiles('foo')
markdown.markdownFromFile(input=infile, output=outfile)
with os.fdopen(outfd, 'r') as fp:
output = fp.read()
self.assertEqual(output, '<p>foo</p>')
def testFileObjects(self):
infile = BytesIO(bytes('foo', encoding='utf-8'))
outfile = BytesIO()
markdown.markdownFromFile(input=infile, output=outfile)
outfile.seek(0)
self.assertEqual(outfile.read().decode('utf-8'), '<p>foo</p>')
def testStdinStdout(self):
markdown.markdownFromFile()
sys.stdout.seek(0)
self.assertEqual(sys.stdout.read().decode('utf-8'), '<p>foo</p>')
class TestBlockParser(unittest.TestCase):
""" Tests of the BlockParser class. """
def setUp(self):
""" Create instance of BlockParser. """
self.parser = markdown.Markdown().parser
def testParseChunk(self):
""" Test BlockParser.parseChunk. """
root = markdown.util.etree.Element("div")
text = 'foo'
self.parser.parseChunk(root, text)
self.assertEqual(
markdown.serializers.to_xhtml_string(root),
"<div><p>foo</p></div>"
)
def testParseDocument(self):
""" Test BlockParser.parseDocument. """
lines = ['#foo', '', 'bar', '', ' baz']
tree = self.parser.parseDocument(lines)
self.assertIsInstance(tree, markdown.util.etree.ElementTree)
self.assertIs(markdown.util.etree.iselement(tree.getroot()), True)
self.assertEqual(
markdown.serializers.to_xhtml_string(tree.getroot()),
"<div><h1>foo</h1><p>bar</p><pre><code>baz\n</code></pre></div>"
)
class TestBlockParserState(unittest.TestCase):
""" Tests of the State class for BlockParser. """
def setUp(self):
self.state = markdown.blockparser.State()
def testBlankState(self):
""" Test State when empty. """
self.assertEqual(self.state, [])
def testSetSate(self):
""" Test State.set(). """
self.state.set('a_state')
self.assertEqual(self.state, ['a_state'])
self.state.set('state2')
self.assertEqual(self.state, ['a_state', 'state2'])
def testIsSate(self):
""" Test State.isstate(). """
self.assertEqual(self.state.isstate('anything'), False)
self.state.set('a_state')
self.assertEqual(self.state.isstate('a_state'), True)
self.state.set('state2')
self.assertEqual(self.state.isstate('state2'), True)
self.assertEqual(self.state.isstate('a_state'), False)
self.assertEqual(self.state.isstate('missing'), False)
def testReset(self):
""" Test State.reset(). """
self.state.set('a_state')
self.state.reset()
self.assertEqual(self.state, [])
self.state.set('state1')
self.state.set('state2')
self.state.reset()
self.assertEqual(self.state, ['state1'])
class TestHtmlStash(unittest.TestCase):
""" Test Markdown's HtmlStash. """
def setUp(self):
self.stash = markdown.util.HtmlStash()
self.placeholder = self.stash.store('foo')
def testSimpleStore(self):
""" Test HtmlStash.store. """
self.assertEqual(self.placeholder, self.stash.get_placeholder(0))
self.assertEqual(self.stash.html_counter, 1)
self.assertEqual(self.stash.rawHtmlBlocks, ['foo'])
def testStoreMore(self):
""" Test HtmlStash.store with additional blocks. """
placeholder = self.stash.store('bar')
self.assertEqual(placeholder, self.stash.get_placeholder(1))
self.assertEqual(self.stash.html_counter, 2)
self.assertEqual(
self.stash.rawHtmlBlocks,
['foo', 'bar']
)
def testReset(self):
""" Test HtmlStash.reset. """
self.stash.reset()
self.assertEqual(self.stash.html_counter, 0)
self.assertEqual(self.stash.rawHtmlBlocks, [])
class Item(object):
""" A dummy Registry item object for testing. """
def __init__(self, data):
self.data = data
def __repr__(self):
return repr(self.data)
def __eq__(self, other):
return self.data == other
class RegistryTests(unittest.TestCase):
""" Test the processor registry. """
def testCreateRegistry(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
self.assertEqual(len(r), 1)
self.assertIsInstance(r, markdown.util.Registry)
def testRegisterWithoutPriority(self):
r = markdown.util.Registry()
with self.assertRaises(TypeError):
r.register(Item('a'))
def testSortRegistry(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 21)
r.register(Item('c'), 'c', 20.5)
self.assertEqual(len(r), 3)
self.assertEqual(list(r), ['b', 'c', 'a'])
def testIsSorted(self):
r = markdown.util.Registry()
self.assertIs(r._is_sorted, False)
r.register(Item('a'), 'a', 20)
list(r)
self.assertIs(r._is_sorted, True)
r.register(Item('b'), 'b', 21)
self.assertIs(r._is_sorted, False)
r['a']
self.assertIs(r._is_sorted, True)
r._is_sorted = False
r.get_index_for_name('a')
self.assertIs(r._is_sorted, True)
r._is_sorted = False
repr(r)
self.assertIs(r._is_sorted, True)
def testDeregister(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
r.register(Item('c'), 'c', 40)
self.assertEqual(len(r), 3)
r.deregister('b')
self.assertEqual(len(r), 2)
r.deregister('c', strict=False)
self.assertEqual(len(r), 1)
# deregister non-existant item with strict=False
r.deregister('d', strict=False)
self.assertEqual(len(r), 1)
with self.assertRaises(ValueError):
# deregister non-existant item with strict=True
r.deregister('e')
self.assertEqual(list(r), ['a'])
def testRegistryContains(self):
r = markdown.util.Registry()
item = Item('a')
r.register(item, 'a', 20)
self.assertIs('a' in r, True)
self.assertIn(item, r)
self.assertNotIn('b', r)
def testRegistryIter(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(list(r), ['b', 'a'])
def testRegistryGetItemByIndex(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r[0], 'b')
self.assertEqual(r[1], 'a')
with self.assertRaises(IndexError):
r[3]
def testRegistryGetItemByItem(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r['a'], 'a')
self.assertEqual(r['b'], 'b')
with self.assertRaises(KeyError):
r['c']
def testRegistrySetItem(self):
r = markdown.util.Registry()
with self.assertRaises(TypeError):
r[0] = 'a'
# TODO: restore this when deprecated __setitem__ is removed.
# with self.assertRaises(TypeError):
# r['a'] = 'a'
# TODO: remove this when deprecated __setitem__ is removed.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
r['a'] = Item('a')
self.assertEqual(list(r), ['a'])
r['b'] = Item('b')
self.assertEqual(list(r), ['a', 'b'])
r['a'] = Item('a1')
self.assertEqual(list(r), ['a1', 'b'])
# Check the warnings
self.assertEqual(len(w), 3)
self.assertTrue(all(issubclass(x.category, DeprecationWarning) for x in w))
def testRegistryDelItem(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
with self.assertRaises(TypeError):
del r[0]
# TODO: restore this when deprecated __del__ is removed.
# with self.assertRaises(TypeError):
# del r['a']
# TODO: remove this when deprecated __del__ is removed.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
r.register(Item('b'), 'b', 15)
r.register(Item('c'), 'c', 10)
del r['b']
self.assertEqual(list(r), ['a', 'c'])
del r['a']
self.assertEqual(list(r), ['c'])
with self.assertRaises(TypeError):
del r['badname']
del r['c']
self.assertEqual(list(r), [])
# Check the warnings
self.assertEqual(len(w), 3)
self.assertTrue(all(issubclass(x.category, DeprecationWarning) for x in w))
def testRegistrySlice(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
r.register(Item('c'), 'c', 40)
slc = r[1:]
self.assertEqual(len(slc), 2)
self.assertIsInstance(slc, markdown.util.Registry)
self.assertEqual(list(slc), ['b', 'a'])
def testGetIndexForName(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r.get_index_for_name('a'), 1)
self.assertEqual(r.get_index_for_name('b'), 0)
with self.assertRaises(ValueError):
r.get_index_for_name('c')
def testRegisterDupplicate(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b1'), 'b', 10)
self.assertEqual(list(r), ['a', 'b1'])
self.assertEqual(len(r), 2)
r.register(Item('b2'), 'b', 30)
self.assertEqual(len(r), 2)
self.assertEqual(list(r), ['b2', 'a'])
def testRegistryDeprecatedAdd(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
r = markdown.util.Registry()
# Add first item
r.add('c', Item('c'), '_begin')
self.assertEqual(list(r), ['c'])
# Added to beginning
r.add('b', Item('b'), '_begin')
self.assertEqual(list(r), ['b', 'c'])
# Add before first item
r.add('a', Item('a'), '<b')
self.assertEqual(list(r), ['a', 'b', 'c'])
# Add before non-first item
r.add('a1', Item('a1'), '<b')
self.assertEqual(list(r), ['a', 'a1', 'b', 'c'])
# Add after non-last item
r.add('b1', Item('b1'), '>b')
self.assertEqual(list(r), ['a', 'a1', 'b', 'b1', 'c'])
# Add after last item
r.add('d', Item('d'), '>c')
self.assertEqual(list(r), ['a', 'a1', 'b', 'b1', 'c', 'd'])
# Add to end
r.add('e', Item('e'), '_end')
self.assertEqual(list(r), ['a', 'a1', 'b', 'b1', 'c', 'd', 'e'])
with self.assertRaises(ValueError):
r.add('f', Item('f'), 'badlocation')
# Check the warnings
self.assertEqual(len(w), 7)
self.assertTrue(all(issubclass(x.category, DeprecationWarning) for x in w))
class TestErrors(unittest.TestCase):
""" Test Error Reporting. """
def setUp(self):
# Set warnings to be raised as errors
warnings.simplefilter('error')
def tearDown(self):
# Reset warning behavior back to default
warnings.simplefilter('default')
def testNonUnicodeSource(self):
""" Test falure on non-unicode source text. """
if not PY3:
source = "foo".encode('utf-16')
self.assertRaises(UnicodeDecodeError, markdown.markdown, source)
def testBadOutputFormat(self):
""" Test failure on bad output_format. """
self.assertRaises(KeyError, markdown.Markdown, output_format='invalid')
def testLoadExtensionFailure(self):
""" Test failure of an extension to load. """
self.assertRaises(
ImportError,
markdown.Markdown, extensions=['non_existant_ext']
)
def testLoadBadExtension(self):
""" Test loading of an Extension with no makeExtension function. """
self.assertRaises(AttributeError, markdown.Markdown, extensions=['markdown.util'])
def testNonExtension(self):
""" Test loading a non Extension object as an extension. """
self.assertRaises(TypeError, markdown.Markdown, extensions=[object])
def testDotNotationExtensionWithBadClass(self):
""" Test Extension loading with non-existant class name (`path.to.module:Class`). """
self.assertRaises(
AttributeError,
markdown.Markdown,
extensions=['markdown.extensions.footnotes:MissingExtension']
)
def testBaseExtention(self):
""" Test that the base Extension class will raise NotImplemented. """
self.assertRaises(
NotImplementedError,
markdown.Markdown, extensions=[markdown.extensions.Extension()]
)
class testETreeComments(unittest.TestCase):
"""
Test that ElementTree Comments work.
These tests should only be a concern when using cElementTree with third
party serializers (including markdown's (x)html serializer). While markdown
doesn't use ElementTree.Comment itself, we should certainly support any
third party extensions which may. Therefore, these tests are included to
ensure such support is maintained.
"""
def setUp(self):
# Create comment node
self.comment = markdown.util.etree.Comment('foo')
if hasattr(markdown.util.etree, 'test_comment'):
self.test_comment = markdown.util.etree.test_comment
else:
self.test_comment = markdown.util.etree.Comment
def testCommentIsComment(self):
""" Test that an ElementTree Comment passes the `is Comment` test. """
self.assertIs(self.comment.tag, markdown.util.etree.test_comment)
def testCommentIsBlockLevel(self):
""" Test that an ElementTree Comment is recognized as BlockLevel. """
md = markdown.Markdown()
self.assertIs(md.is_block_level(self.comment.tag), False)
def testCommentSerialization(self):
""" Test that an ElementTree Comment serializes properly. """
self.assertEqual(
markdown.serializers.to_html_string(self.comment),
'<!--foo-->'
)
def testCommentPrettify(self):
""" Test that an ElementTree Comment is prettified properly. """
pretty = markdown.treeprocessors.PrettifyTreeprocessor(markdown.Markdown())
pretty.run(self.comment)
self.assertEqual(
markdown.serializers.to_html_string(self.comment),
'<!--foo-->\n'
)
class testElementTailTests(unittest.TestCase):
""" Element Tail Tests """
def setUp(self):
self.pretty = markdown.treeprocessors.PrettifyTreeprocessor(markdown.Markdown())
def testBrTailNoNewline(self):
""" Test that last <br> in tree has a new line tail """
root = markdown.util.etree.Element('root')
br = markdown.util.etree.SubElement(root, 'br')
self.assertEqual(br.tail, None)
self.pretty.run(root)
self.assertEqual(br.tail, "\n")
class testSerializers(unittest.TestCase):
""" Test the html and xhtml serializers. """
def testHtml(self):
""" Test HTML serialization. """
el = markdown.util.etree.Element('div')
el.set('id', 'foo<&">')
p = markdown.util.etree.SubElement(el, 'p')
p.text = 'foo <&escaped>'
p.set('hidden', 'hidden')
markdown.util.etree.SubElement(el, 'hr')
non_element = markdown.util.etree.SubElement(el, None)
non_element.text = 'non-element text'
script = markdown.util.etree.SubElement(non_element, 'script')
script.text = '<&"test\nescaping">'
el.tail = "tail text"
self.assertEqual(
markdown.serializers.to_html_string(el),
'<div id="foo<&">">'
'<p hidden>foo <&escaped></p>'
'<hr>'
'non-element text'
'<script><&"test\nescaping"></script>'
'</div>tail text'
)
def testXhtml(self):
"""" Test XHTML serialization. """
el = markdown.util.etree.Element('div')
el.set('id', 'foo<&">')
p = markdown.util.etree.SubElement(el, 'p')
p.text = 'foo<&escaped>'
p.set('hidden', 'hidden')
markdown.util.etree.SubElement(el, 'hr')
non_element = markdown.util.etree.SubElement(el, None)
non_element.text = 'non-element text'
script = markdown.util.etree.SubElement(non_element, 'script')
script.text = '<&"test\nescaping">'
el.tail = "tail text"
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div id="foo<&">">'
'<p hidden="hidden">foo<&escaped></p>'
'<hr />'
'non-element text'
'<script><&"test\nescaping"></script>'
'</div>tail text'
)
def testMixedCaseTags(self):
"""" Test preservation of tag case. """
el = markdown.util.etree.Element('MixedCase')
el.text = 'not valid '
em = markdown.util.etree.SubElement(el, 'EMPHASIS')
em.text = 'html'
markdown.util.etree.SubElement(el, 'HR')
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<MixedCase>not valid <EMPHASIS>html</EMPHASIS><HR /></MixedCase>'
)
def testProsessingInstruction(self):
""" Test serialization of ProcessignInstruction. """
pi = ProcessingInstruction('foo', text='<&"test\nescaping">')
self.assertIs(pi.tag, ProcessingInstruction)
self.assertEqual(
markdown.serializers.to_xhtml_string(pi),
'<?foo <&"test\nescaping">?>'
)
def testQNameTag(self):
""" Test serialization of QName tag. """
div = markdown.util.etree.Element('div')
qname = markdown.util.etree.QName('http://www.w3.org/1998/Math/MathML', 'math')
math = markdown.util.etree.SubElement(div, qname)
math.set('display', 'block')
sem = markdown.util.etree.SubElement(math, 'semantics')
msup = markdown.util.etree.SubElement(sem, 'msup')
mi = markdown.util.etree.SubElement(msup, 'mi')
mi.text = 'x'
mn = markdown.util.etree.SubElement(msup, 'mn')
mn.text = '2'
ann = markdown.util.etree.SubElement(sem, 'annotations')
ann.text = 'x^2'
self.assertEqual(
markdown.serializers.to_xhtml_string(div),
'<div>'
'<math display="block" xmlns="http://www.w3.org/1998/Math/MathML">'
'<semantics>'
'<msup>'
'<mi>x</mi>'
'<mn>2</mn>'
'</msup>'
'<annotations>x^2</annotations>'
'</semantics>'
'</math>'
'</div>'
)
def testQNameAttribute(self):
""" Test serialization of QName attribute. """
div = markdown.util.etree.Element('div')
div.set(markdown.util.etree.QName('foo'), markdown.util.etree.QName('bar'))
self.assertEqual(
markdown.serializers.to_xhtml_string(div),
'<div foo="bar"></div>'
)
def testBadQNameTag(self):
""" Test serialization of QName with no tag. """
qname = markdown.util.etree.QName('http://www.w3.org/1998/Math/MathML')
el = markdown.util.etree.Element(qname)
self.assertRaises(ValueError, markdown.serializers.to_xhtml_string, el)
def testQNameEscaping(self):
""" Test QName escaping. """
qname = markdown.util.etree.QName('<&"test\nescaping">', 'div')
el = markdown.util.etree.Element(qname)
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div xmlns="<&"test escaping">"></div>'
)
def testQNamePreEscaping(self):
""" Test QName that is already partially escaped. """
qname = markdown.util.etree.QName('<&"test escaping">', 'div')
el = markdown.util.etree.Element(qname)
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div xmlns="<&"test escaping">"></div>'
)
def buildExtension(self):
""" Build an extension which registers fakeSerializer. """
def fakeSerializer(elem):
# Ignore input and return hardcoded output
return '<div><p>foo</p></div>'
class registerFakeSerializer(markdown.extensions.Extension):
def extendMarkdown(self, md):
md.output_formats['fake'] = fakeSerializer
return registerFakeSerializer()
def testRegisterSerializer(self):
self.assertEqual(
markdown.markdown(
'baz', extensions=[self.buildExtension()], output_format='fake'
),
'<p>foo</p>'
)
def testXHTMLOutput(self):
self.assertEqual(
markdown.markdown('foo \nbar', output_format='xhtml'),
'<p>foo<br />\nbar</p>'
)
def testHTMLOutput(self):
self.assertEqual(
markdown.markdown('foo \nbar', output_format='html'),
'<p>foo<br>\nbar</p>'
)
class testAtomicString(unittest.TestCase):
""" Test that AtomicStrings are honored (not parsed). """
def setUp(self):
md = markdown.Markdown()
self.inlineprocessor = md.treeprocessors['inline']
def testString(self):
""" Test that a regular string is parsed. """
tree = markdown.util.etree.Element('div')
p = markdown.util.etree.SubElement(tree, 'p')
p.text = 'some *text*'
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>some <em>text</em></p></div>'
)
def testSimpleAtomicString(self):
""" Test that a simple AtomicString is not parsed. """
tree = markdown.util.etree.Element('div')
p = markdown.util.etree.SubElement(tree, 'p')
p.text = markdown.util.AtomicString('some *text*')
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>some *text*</p></div>'
)
def testNestedAtomicString(self):
""" Test that a nested AtomicString is not parsed. """
tree = markdown.util.etree.Element('div')
p = markdown.util.etree.SubElement(tree, 'p')
p.text = markdown.util.AtomicString('*some* ')
span1 = markdown.util.etree.SubElement(p, 'span')
span1.text = markdown.util.AtomicString('*more* ')
span2 = markdown.util.etree.SubElement(span1, 'span')
span2.text = markdown.util.AtomicString('*text* ')
span3 = markdown.util.etree.SubElement(span2, 'span')
span3.text = markdown.util.AtomicString('*here*')
span3.tail = markdown.util.AtomicString(' *to*')
span2.tail = markdown.util.AtomicString(' *test*')
span1.tail = markdown.util.AtomicString(' *with*')
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>*some* <span>*more* <span>*text* <span>*here*</span> '
'*to*</span> *test*</span> *with*</p></div>'
)
class TestConfigParsing(unittest.TestCase):
def assertParses(self, value, result):
self.assertIs(markdown.util.parseBoolValue(value, False), result)
def testBooleansParsing(self):
self.assertParses(True, True)
self.assertParses('novalue', None)
self.assertParses('yES', True)
self.assertParses('FALSE', False)
self.assertParses(0., False)
self.assertParses('none', False)
def testPreserveNone(self):
self.assertIsNone(markdown.util.parseBoolValue('None', preserve_none=True))
self.assertIsNone(markdown.util.parseBoolValue(None, preserve_none=True))
def testInvalidBooleansParsing(self):
self.assertRaises(ValueError, markdown.util.parseBoolValue, 'novalue')
class TestCliOptionParsing(unittest.TestCase):
""" Test parsing of Command Line Interface Options. """
def setUp(self):
self.default_options = {
'input': None,
'output': None,
'encoding': None,
'output_format': 'xhtml',
'lazy_ol': True,
'extensions': [],
'extension_configs': {},
}
self.tempfile = ''
def tearDown(self):
if os.path.isfile(self.tempfile):
os.remove(self.tempfile)
def testNoOptions(self):
options, logging_level = parse_options([])
self.assertEqual(options, self.default_options)
self.assertEqual(logging_level, CRITICAL)
def testQuietOption(self):
options, logging_level = parse_options(['-q'])
self.assertGreater(logging_level, CRITICAL)
def testVerboseOption(self):
options, logging_level = parse_options(['-v'])
self.assertEqual(logging_level, WARNING)
def testNoisyOption(self):
options, logging_level = parse_options(['--noisy'])
self.assertEqual(logging_level, DEBUG)
def testInputFileOption(self):
options, logging_level = parse_options(['foo.txt'])
self.default_options['input'] = 'foo.txt'
self.assertEqual(options, self.default_options)
def testOutputFileOption(self):
options, logging_level = parse_options(['-f', 'foo.html'])
self.default_options['output'] = 'foo.html'
self.assertEqual(options, self.default_options)
def testInputAndOutputFileOptions(self):
options, logging_level = parse_options(['-f', 'foo.html', 'foo.txt'])
self.default_options['output'] = 'foo.html'
self.default_options['input'] = 'foo.txt'
self.assertEqual(options, self.default_options)
def testEncodingOption(self):
options, logging_level = parse_options(['-e', 'utf-8'])
self.default_options['encoding'] = 'utf-8'
self.assertEqual(options, self.default_options)
def testOutputFormatOption(self):
options, logging_level = parse_options(['-o', 'html'])
self.default_options['output_format'] = 'html'
self.assertEqual(options, self.default_options)
def testNoLazyOlOption(self):
options, logging_level = parse_options(['-n'])
self.default_options['lazy_ol'] = False
self.assertEqual(options, self.default_options)
def testExtensionOption(self):
options, logging_level = parse_options(['-x', 'markdown.extensions.footnotes'])
self.default_options['extensions'] = ['markdown.extensions.footnotes']
self.assertEqual(options, self.default_options)
def testMultipleExtensionOptions(self):
options, logging_level = parse_options([
'-x', 'markdown.extensions.footnotes',
'-x', 'markdown.extensions.smarty'
])
self.default_options['extensions'] = [
'markdown.extensions.footnotes',
'markdown.extensions.smarty'
]
self.assertEqual(options, self.default_options)
def create_config_file(self, config):
""" Helper to create temp config files. """
if not isinstance(config, markdown.util.string_type):
# convert to string
config = yaml.dump(config)
fd, self.tempfile = tempfile.mkstemp('.yml')
with os.fdopen(fd, 'w') as fp:
fp.write(config)
def testExtensionConfigOption(self):
config = {
'markdown.extensions.wikilinks': {
'base_url': 'http://example.com/',
'end_url': '.html',
'html_class': 'test',
},
'markdown.extensions.footnotes:FootnotesExtension': {
'PLACE_MARKER': '~~~footnotes~~~'
}
}
self.create_config_file(config)
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def textBoolExtensionConfigOption(self):
config = {
'markdown.extensions.toc': {
'title': 'Some Title',
'anchorlink': True,
'permalink': True
}
}
self.create_config_file(config)
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def testExtensionConfigOptionAsJSON(self):
config = {
'markdown.extensions.wikilinks': {
'base_url': 'http://example.com/',
'end_url': '.html',
'html_class': 'test',
},
'markdown.extensions.footnotes:FootnotesExtension': {
'PLACE_MARKER': '~~~footnotes~~~'
}
}
import json
self.create_config_file(json.dumps(config))
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def testExtensionConfigOptionMissingFile(self):
self.assertRaises(IOError, parse_options, ['-c', 'missing_file.yaml'])
def testExtensionConfigOptionBadFormat(self):
config = """
[footnotes]
PLACE_MARKER= ~~~footnotes~~~
"""
self.create_config_file(config)
self.assertRaises(yaml.YAMLError, parse_options, ['-c', self.tempfile])
class TestEscapeAppend(unittest.TestCase):
""" Tests escape character append. """
def testAppend(self):
""" Test that appended escapes are only in the current instance. """
md = markdown.Markdown()
md.ESCAPED_CHARS.append('|')
self.assertEqual('|' in md.ESCAPED_CHARS, True)
md2 = markdown.Markdown()
self.assertEqual('|' not in md2.ESCAPED_CHARS, True)
class TestBlockAppend(unittest.TestCase):
""" Tests block kHTML append. """
def testBlockAppend(self):
""" Test that appended escapes are only in the current instance. """
md = markdown.Markdown()
md.block_level_elements.append('test')
self.assertEqual('test' in md.block_level_elements, True)
md2 = markdown.Markdown()
self.assertEqual('test' not in md2.block_level_elements, True)
class TestAncestorExclusion(unittest.TestCase):
""" Tests exclusion of tags in ancestor list. """
class AncestorExample(markdown.inlinepatterns.SimpleTagInlineProcessor):
""" Ancestor Test. """
ANCESTOR_EXCLUDES = ('a',)
def handleMatch(self, m, data):
""" Handle match. """
el = markdown.util.etree.Element(self.tag)
el.text = m.group(2)
return el, m.start(0), m.end(0)
class AncestorExtension(markdown.Extension):
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {}
def extendMarkdown(self, md):
"""Modify inline patterns."""
pattern = r'(\+)([^\+]+)\1'
md.inlinePatterns.register(TestAncestorExclusion.AncestorExample(pattern, 'strong'), 'ancestor-test', 0)
def setUp(self):
"""Setup markdown object."""
self.md = markdown.Markdown(extensions=[TestAncestorExclusion.AncestorExtension()])
def test_ancestors(self):
""" Test that an extension can exclude parent tags. """
test = """
Some +test+ and a [+link+](http://test.com)
"""
result = """<p>Some <strong>test</strong> and a <a href="http://test.com">+link+</a></p>"""
self.md.reset()
self.assertEqual(self.md.convert(test), result)
def test_ancestors_tail(self):
""" Test that an extension can exclude parent tags when dealing with a tail. """
test = """
[***+em+*+strong+**](http://test.com)
"""
result = """<p><a href="http://test.com"><strong><em>+em+</em>+strong+</strong></a></p>"""
self.md.reset()
self.assertEqual(self.md.convert(test), result)
class TestGeneralDeprecations(unittest.TestCase):
"""Test general deprecations."""
def test_version_deprecation(self):
"""Test that version is deprecated."""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
version = markdown.version
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
self.assertEqual(version, markdown.__version__)
def test_version_info_deprecation(self):
"""Test that version info is deprecated."""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
version_info = markdown.version_info
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
self.assertEqual(version_info, markdown.__version_info__)
def test_deprecation_wrapper_dir(self):
"""Tests the `__dir__` attribute of the class as it replaces the module's."""
dir_attr = dir(markdown)
self.assertNotIn('version', dir_attr)
self.assertIn('__version__', dir_attr)
self.assertNotIn('version_info', dir_attr)
self.assertIn('__version_info__', dir_attr)
| [((55, 18, 55, 37), 'markdown.Markdown', 'markdown.Markdown', ({}, {}), '()', False, 'import markdown\n'), ((76, 8, 76, 51), 'markdown.Markdown', 'markdown.Markdown', (), '', False, 'import markdown\n'), ((80, 8, 80, 71), 'markdown.Markdown', 'markdown.Markdown', (), '', False, 'import markdown\n'), ((84, 8, 84, 89), 'markdown.Markdown', 'markdown.Markdown', (), '', False, 'import markdown\n'), ((93, 21, 93, 30), 'io.BytesIO', 'BytesIO', ({}, {}), '()', False, 'from io import BytesIO\n'), ((100, 23, 100, 54), 'tempfile.mkstemp', 'tempfile.mkstemp', (), '', False, 'import tempfile\n'), ((103, 25, 103, 57), 'tempfile.mkstemp', 'tempfile.mkstemp', (), '', False, 'import tempfile\n'), ((108, 8, 108, 63), 'markdown.markdownFromFile', 'markdown.markdownFromFile', (), '', False, 'import markdown\n'), ((115, 18, 115, 27), 'io.BytesIO', 'BytesIO', ({}, {}), '()', False, 'from io import BytesIO\n'), ((116, 8, 116, 63), 'markdown.markdownFromFile', 'markdown.markdownFromFile', (), '', False, 'import markdown\n'), ((121, 8, 121, 35), 'markdown.markdownFromFile', 'markdown.markdownFromFile', ({}, {}), '()', False, 'import markdown\n'), ((122, 8, 122, 26), 'sys.stdout.seek', 'sys.stdout.seek', ({(122, 24, 122, 25): '(0)'}, {}), '(0)', False, 'import sys\n'), ((135, 15, 135, 49), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(135, 43, 135, 48): '"""div"""'}, {}), "('div')", False, 'import markdown\n'), ((159, 21, 159, 49), 'markdown.blockparser.State', 'markdown.blockparser.State', ({}, {}), '()', False, 'import markdown\n'), ((197, 21, 197, 46), 'markdown.util.HtmlStash', 'markdown.util.HtmlStash', ({}, {}), '()', False, 'import markdown\n'), ((239, 12, 239, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((245, 12, 245, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((250, 12, 250, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((258, 12, 258, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((275, 12, 275, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((293, 12, 293, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((301, 12, 301, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((307, 12, 307, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((316, 12, 316, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((325, 12, 325, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((347, 12, 347, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((374, 12, 374, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((384, 12, 384, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((393, 12, 393, 36), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((441, 8, 441, 38), 'warnings.simplefilter', 'warnings.simplefilter', ({(441, 30, 441, 37): '"""error"""'}, {}), "('error')", False, 'import warnings\n'), ((445, 8, 445, 40), 'warnings.simplefilter', 'warnings.simplefilter', ({(445, 30, 445, 39): '"""default"""'}, {}), "('default')", False, 'import warnings\n'), ((501, 23, 501, 57), 'markdown.util.etree.Comment', 'markdown.util.etree.Comment', ({(501, 51, 501, 56): '"""foo"""'}, {}), "('foo')", False, 'import markdown\n'), ((513, 13, 513, 32), 'markdown.Markdown', 'markdown.Markdown', ({}, {}), '()', False, 'import markdown\n'), ((540, 15, 540, 50), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(540, 43, 540, 49): '"""root"""'}, {}), "('root')", False, 'import markdown\n'), ((541, 13, 541, 55), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(541, 44, 541, 48): 'root', (541, 50, 541, 54): '"""br"""'}, {}), "(root, 'br')", False, 'import markdown\n'), ((552, 13, 552, 47), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(552, 41, 552, 46): '"""div"""'}, {}), "('div')", False, 'import markdown\n'), ((554, 12, 554, 51), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(554, 43, 554, 45): 'el', (554, 47, 554, 50): '"""p"""'}, {}), "(el, 'p')", False, 'import markdown\n'), ((557, 8, 557, 48), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(557, 39, 557, 41): 'el', (557, 43, 557, 47): '"""hr"""'}, {}), "(el, 'hr')", False, 'import markdown\n'), ((558, 22, 558, 62), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(558, 53, 558, 55): 'el', (558, 57, 558, 61): 'None'}, {}), '(el, None)', False, 'import markdown\n'), ((560, 17, 560, 70), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(560, 48, 560, 59): 'non_element', (560, 61, 560, 69): '"""script"""'}, {}), "(non_element, 'script')", False, 'import markdown\n'), ((575, 13, 575, 47), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(575, 41, 575, 46): '"""div"""'}, {}), "('div')", False, 'import markdown\n'), ((577, 12, 577, 51), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(577, 43, 577, 45): 'el', (577, 47, 577, 50): '"""p"""'}, {}), "(el, 'p')", False, 'import markdown\n'), ((580, 8, 580, 48), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(580, 39, 580, 41): 'el', (580, 43, 580, 47): '"""hr"""'}, {}), "(el, 'hr')", False, 'import markdown\n'), ((581, 22, 581, 62), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(581, 53, 581, 55): 'el', (581, 57, 581, 61): 'None'}, {}), '(el, None)', False, 'import markdown\n'), ((583, 17, 583, 70), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(583, 48, 583, 59): 'non_element', (583, 61, 583, 69): '"""script"""'}, {}), "(non_element, 'script')", False, 'import markdown\n'), ((598, 13, 598, 53), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(598, 41, 598, 52): '"""MixedCase"""'}, {}), "('MixedCase')", False, 'import markdown\n'), ((600, 13, 600, 59), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(600, 44, 600, 46): 'el', (600, 48, 600, 58): '"""EMPHASIS"""'}, {}), "(el, 'EMPHASIS')", False, 'import markdown\n'), ((602, 8, 602, 48), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(602, 39, 602, 41): 'el', (602, 43, 602, 47): '"""HR"""'}, {}), "(el, 'HR')", False, 'import markdown\n'), ((610, 13, 610, 69), 'xml.etree.ElementTree.ProcessingInstruction', 'ProcessingInstruction', (), '', False, 'from xml.etree.ElementTree import ProcessingInstruction\n'), ((619, 14, 619, 48), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(619, 42, 619, 47): '"""div"""'}, {}), "('div')", False, 'import markdown\n'), ((620, 16, 620, 87), 'markdown.util.etree.QName', 'markdown.util.etree.QName', ({(620, 42, 620, 78): '"""http://www.w3.org/1998/Math/MathML"""', (620, 80, 620, 86): '"""math"""'}, {}), "('http://www.w3.org/1998/Math/MathML', 'math')", False, 'import markdown\n'), ((621, 15, 621, 57), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(621, 46, 621, 49): 'div', (621, 51, 621, 56): 'qname'}, {}), '(div, qname)', False, 'import markdown\n'), ((623, 14, 623, 63), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(623, 45, 623, 49): 'math', (623, 51, 623, 62): '"""semantics"""'}, {}), "(math, 'semantics')", False, 'import markdown\n'), ((624, 15, 624, 58), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(624, 46, 624, 49): 'sem', (624, 51, 624, 57): '"""msup"""'}, {}), "(sem, 'msup')", False, 'import markdown\n'), ((625, 13, 625, 55), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(625, 44, 625, 48): 'msup', (625, 50, 625, 54): '"""mi"""'}, {}), "(msup, 'mi')", False, 'import markdown\n'), ((627, 13, 627, 55), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(627, 44, 627, 48): 'msup', (627, 50, 627, 54): '"""mn"""'}, {}), "(msup, 'mn')", False, 'import markdown\n'), ((629, 14, 629, 64), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(629, 45, 629, 48): 'sem', (629, 50, 629, 63): '"""annotations"""'}, {}), "(sem, 'annotations')", False, 'import markdown\n'), ((648, 14, 648, 48), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(648, 42, 648, 47): '"""div"""'}, {}), "('div')", False, 'import markdown\n'), ((657, 16, 657, 79), 'markdown.util.etree.QName', 'markdown.util.etree.QName', ({(657, 42, 657, 78): '"""http://www.w3.org/1998/Math/MathML"""'}, {}), "('http://www.w3.org/1998/Math/MathML')", False, 'import markdown\n'), ((658, 13, 658, 47), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(658, 41, 658, 46): 'qname'}, {}), '(qname)', False, 'import markdown\n'), ((663, 16, 663, 71), 'markdown.util.etree.QName', 'markdown.util.etree.QName', ({(663, 42, 663, 63): '"""<&"test\nescaping">"""', (663, 65, 663, 70): '"""div"""'}, {}), '("""<&"test\nescaping">""", \'div\')', False, 'import markdown\n'), ((664, 13, 664, 47), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(664, 41, 664, 46): 'qname'}, {}), '(qname)', False, 'import markdown\n'), ((672, 16, 672, 84), 'markdown.util.etree.QName', 'markdown.util.etree.QName', ({(672, 42, 672, 76): '"""<&"test escaping">"""', (672, 78, 672, 83): '"""div"""'}, {}), '(\'<&"test escaping">\', \'div\')', False, 'import markdown\n'), ((673, 13, 673, 47), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(673, 41, 673, 46): 'qname'}, {}), '(qname)', False, 'import markdown\n'), ((716, 13, 716, 32), 'markdown.Markdown', 'markdown.Markdown', ({}, {}), '()', False, 'import markdown\n'), ((721, 15, 721, 49), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(721, 43, 721, 48): '"""div"""'}, {}), "('div')", False, 'import markdown\n'), ((722, 12, 722, 53), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(722, 43, 722, 47): 'tree', (722, 49, 722, 52): '"""p"""'}, {}), "(tree, 'p')", False, 'import markdown\n'), ((732, 15, 732, 49), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(732, 43, 732, 48): '"""div"""'}, {}), "('div')", False, 'import markdown\n'), ((733, 12, 733, 53), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(733, 43, 733, 47): 'tree', (733, 49, 733, 52): '"""p"""'}, {}), "(tree, 'p')", False, 'import markdown\n'), ((734, 17, 734, 58), 'markdown.util.AtomicString', 'markdown.util.AtomicString', ({(734, 44, 734, 57): '"""some *text*"""'}, {}), "('some *text*')", False, 'import markdown\n'), ((743, 15, 743, 49), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(743, 43, 743, 48): '"""div"""'}, {}), "('div')", False, 'import markdown\n'), ((744, 12, 744, 53), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(744, 43, 744, 47): 'tree', (744, 49, 744, 52): '"""p"""'}, {}), "(tree, 'p')", False, 'import markdown\n'), ((745, 17, 745, 54), 'markdown.util.AtomicString', 'markdown.util.AtomicString', ({(745, 44, 745, 53): '"""*some* """'}, {}), "('*some* ')", False, 'import markdown\n'), ((746, 16, 746, 57), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(746, 47, 746, 48): 'p', (746, 50, 746, 56): '"""span"""'}, {}), "(p, 'span')", False, 'import markdown\n'), ((747, 21, 747, 58), 'markdown.util.AtomicString', 'markdown.util.AtomicString', ({(747, 48, 747, 57): '"""*more* """'}, {}), "('*more* ')", False, 'import markdown\n'), ((748, 16, 748, 61), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(748, 47, 748, 52): 'span1', (748, 54, 748, 60): '"""span"""'}, {}), "(span1, 'span')", False, 'import markdown\n'), ((749, 21, 749, 58), 'markdown.util.AtomicString', 'markdown.util.AtomicString', ({(749, 48, 749, 57): '"""*text* """'}, {}), "('*text* ')", False, 'import markdown\n'), ((750, 16, 750, 61), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', ({(750, 47, 750, 52): 'span2', (750, 54, 750, 60): '"""span"""'}, {}), "(span2, 'span')", False, 'import markdown\n'), ((751, 21, 751, 57), 'markdown.util.AtomicString', 'markdown.util.AtomicString', ({(751, 48, 751, 56): '"""*here*"""'}, {}), "('*here*')", False, 'import markdown\n'), ((752, 21, 752, 56), 'markdown.util.AtomicString', 'markdown.util.AtomicString', ({(752, 48, 752, 55): '""" *to*"""'}, {}), "(' *to*')", False, 'import markdown\n'), ((753, 21, 753, 58), 'markdown.util.AtomicString', 'markdown.util.AtomicString', ({(753, 48, 753, 57): '""" *test*"""'}, {}), "(' *test*')", False, 'import markdown\n'), ((754, 21, 754, 58), 'markdown.util.AtomicString', 'markdown.util.AtomicString', ({(754, 48, 754, 57): '""" *with*"""'}, {}), "(' *with*')", False, 'import markdown\n'), ((799, 11, 799, 40), 'os.path.isfile', 'os.path.isfile', ({(799, 26, 799, 39): 'self.tempfile'}, {}), '(self.tempfile)', False, 'import os\n'), ((803, 33, 803, 50), 'markdown.__main__.parse_options', 'parse_options', ({(803, 47, 803, 49): '[]'}, {}), '([])', False, 'from markdown.__main__ import parse_options\n'), ((808, 33, 808, 54), 'markdown.__main__.parse_options', 'parse_options', ({(808, 47, 808, 53): "['-q']"}, {}), "(['-q'])", False, 'from markdown.__main__ import parse_options\n'), ((812, 33, 812, 54), 'markdown.__main__.parse_options', 'parse_options', ({(812, 47, 812, 53): "['-v']"}, {}), "(['-v'])", False, 'from markdown.__main__ import parse_options\n'), ((816, 33, 816, 59), 'markdown.__main__.parse_options', 'parse_options', ({(816, 47, 816, 58): "['--noisy']"}, {}), "(['--noisy'])", False, 'from markdown.__main__ import parse_options\n'), ((820, 33, 820, 59), 'markdown.__main__.parse_options', 'parse_options', ({(820, 47, 820, 58): "['foo.txt']"}, {}), "(['foo.txt'])", False, 'from markdown.__main__ import parse_options\n'), ((825, 33, 825, 66), 'markdown.__main__.parse_options', 'parse_options', ({(825, 47, 825, 65): "['-f', 'foo.html']"}, {}), "(['-f', 'foo.html'])", False, 'from markdown.__main__ import parse_options\n'), ((830, 33, 830, 77), 'markdown.__main__.parse_options', 'parse_options', ({(830, 47, 830, 76): "['-f', 'foo.html', 'foo.txt']"}, {}), "(['-f', 'foo.html', 'foo.txt'])", False, 'from markdown.__main__ import parse_options\n'), ((836, 33, 836, 63), 'markdown.__main__.parse_options', 'parse_options', ({(836, 47, 836, 62): "['-e', 'utf-8']"}, {}), "(['-e', 'utf-8'])", False, 'from markdown.__main__ import parse_options\n'), ((841, 33, 841, 62), 'markdown.__main__.parse_options', 'parse_options', ({(841, 47, 841, 61): "['-o', 'html']"}, {}), "(['-o', 'html'])", False, 'from markdown.__main__ import parse_options\n'), ((846, 33, 846, 54), 'markdown.__main__.parse_options', 'parse_options', ({(846, 47, 846, 53): "['-n']"}, {}), "(['-n'])", False, 'from markdown.__main__ import parse_options\n'), ((851, 33, 851, 87), 'markdown.__main__.parse_options', 'parse_options', ({(851, 47, 851, 86): "['-x', 'markdown.extensions.footnotes']"}, {}), "(['-x', 'markdown.extensions.footnotes'])", False, 'from markdown.__main__ import parse_options\n'), ((856, 33, 859, 10), 'markdown.__main__.parse_options', 'parse_options', ({(856, 47, 859, 9): "['-x', 'markdown.extensions.footnotes', '-x', 'markdown.extensions.smarty']"}, {}), "(['-x', 'markdown.extensions.footnotes', '-x',\n 'markdown.extensions.smarty'])", False, 'from markdown.__main__ import parse_options\n'), ((871, 28, 871, 52), 'tempfile.mkstemp', 'tempfile.mkstemp', ({(871, 45, 871, 51): '""".yml"""'}, {}), "('.yml')", False, 'import tempfile\n'), ((887, 33, 887, 69), 'markdown.__main__.parse_options', 'parse_options', ({(887, 47, 887, 68): "['-c', self.tempfile]"}, {}), "(['-c', self.tempfile])", False, 'from markdown.__main__ import parse_options\n'), ((900, 33, 900, 69), 'markdown.__main__.parse_options', 'parse_options', ({(900, 47, 900, 68): "['-c', self.tempfile]"}, {}), "(['-c', self.tempfile])", False, 'from markdown.__main__ import parse_options\n'), ((917, 33, 917, 69), 'markdown.__main__.parse_options', 'parse_options', ({(917, 47, 917, 68): "['-c', self.tempfile]"}, {}), "(['-c', self.tempfile])", False, 'from markdown.__main__ import parse_options\n'), ((938, 13, 938, 32), 'markdown.Markdown', 'markdown.Markdown', ({}, {}), '()', False, 'import markdown\n'), ((941, 14, 941, 33), 'markdown.Markdown', 'markdown.Markdown', ({}, {}), '()', False, 'import markdown\n'), ((950, 13, 950, 32), 'markdown.Markdown', 'markdown.Markdown', ({}, {}), '()', False, 'import markdown\n'), ((953, 14, 953, 33), 'markdown.Markdown', 'markdown.Markdown', ({}, {}), '()', False, 'import markdown\n'), ((101, 13, 101, 33), 'os.fdopen', 'os.fdopen', ({(101, 23, 101, 27): 'infd', (101, 29, 101, 32): '"""w"""'}, {}), "(infd, 'w')", False, 'import os\n'), ((109, 13, 109, 34), 'os.fdopen', 'os.fdopen', ({(109, 23, 109, 28): 'outfd', (109, 30, 109, 33): '"""r"""'}, {}), "(outfd, 'r')", False, 'import os\n'), ((131, 22, 131, 41), 'markdown.Markdown', 'markdown.Markdown', ({}, {}), '()', False, 'import markdown\n'), ((139, 12, 139, 54), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', ({(139, 49, 139, 53): 'root'}, {}), '(root)', False, 'import markdown\n'), ((332, 13, 332, 49), 'warnings.catch_warnings', 'warnings.catch_warnings', (), '', False, 'import warnings\n'), ((333, 12, 333, 43), 'warnings.simplefilter', 'warnings.simplefilter', ({(333, 34, 333, 42): '"""always"""'}, {}), "('always')", False, 'import warnings\n'), ((355, 13, 355, 49), 'warnings.catch_warnings', 'warnings.catch_warnings', (), '', False, 'import warnings\n'), ((356, 12, 356, 43), 'warnings.simplefilter', 'warnings.simplefilter', ({(356, 34, 356, 42): '"""always"""'}, {}), "('always')", False, 'import warnings\n'), ((403, 13, 403, 49), 'warnings.catch_warnings', 'warnings.catch_warnings', (), '', False, 'import warnings\n'), ((404, 12, 404, 43), 'warnings.simplefilter', 'warnings.simplefilter', ({(404, 34, 404, 42): '"""always"""'}, {}), "('always')", False, 'import warnings\n'), ((406, 16, 406, 40), 'markdown.util.Registry', 'markdown.util.Registry', ({}, {}), '()', False, 'import markdown\n'), ((519, 12, 519, 61), 'markdown.serializers.to_html_string', 'markdown.serializers.to_html_string', ({(519, 48, 519, 60): 'self.comment'}, {}), '(self.comment)', False, 'import markdown\n'), ((525, 63, 525, 82), 'markdown.Markdown', 'markdown.Markdown', ({}, {}), '()', False, 'import markdown\n'), ((528, 12, 528, 61), 'markdown.serializers.to_html_string', 'markdown.serializers.to_html_string', ({(528, 48, 528, 60): 'self.comment'}, {}), '(self.comment)', False, 'import markdown\n'), ((536, 68, 536, 87), 'markdown.Markdown', 'markdown.Markdown', ({}, {}), '()', False, 'import markdown\n'), ((564, 12, 564, 51), 'markdown.serializers.to_html_string', 'markdown.serializers.to_html_string', ({(564, 48, 564, 50): 'el'}, {}), '(el)', False, 'import markdown\n'), ((587, 12, 587, 52), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', ({(587, 49, 587, 51): 'el'}, {}), '(el)', False, 'import markdown\n'), ((604, 12, 604, 52), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', ({(604, 49, 604, 51): 'el'}, {}), '(el)', False, 'import markdown\n'), ((613, 12, 613, 52), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', ({(613, 49, 613, 51): 'pi'}, {}), '(pi)', False, 'import markdown\n'), ((632, 12, 632, 53), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', ({(632, 49, 632, 52): 'div'}, {}), '(div)', False, 'import markdown\n'), ((649, 16, 649, 48), 'markdown.util.etree.QName', 'markdown.util.etree.QName', ({(649, 42, 649, 47): '"""foo"""'}, {}), "('foo')", False, 'import markdown\n'), ((649, 50, 649, 82), 'markdown.util.etree.QName', 'markdown.util.etree.QName', ({(649, 76, 649, 81): '"""bar"""'}, {}), "('bar')", False, 'import markdown\n'), ((651, 12, 651, 53), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', ({(651, 49, 651, 52): 'div'}, {}), '(div)', False, 'import markdown\n'), ((666, 12, 666, 52), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', ({(666, 49, 666, 51): 'el'}, {}), '(el)', False, 'import markdown\n'), ((675, 12, 675, 52), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', ({(675, 49, 675, 51): 'el'}, {}), '(el)', False, 'import markdown\n'), ((701, 12, 701, 66), 'markdown.markdown', 'markdown.markdown', (), '', False, 'import markdown\n'), ((707, 12, 707, 65), 'markdown.markdown', 'markdown.markdown', (), '', False, 'import markdown\n'), ((726, 12, 726, 52), 'markdown.serializers.to_html_string', 'markdown.serializers.to_html_string', ({(726, 48, 726, 51): 'new'}, {}), '(new)', False, 'import markdown\n'), ((737, 12, 737, 52), 'markdown.serializers.to_html_string', 'markdown.serializers.to_html_string', ({(737, 48, 737, 51): 'new'}, {}), '(new)', False, 'import markdown\n'), ((757, 12, 757, 52), 'markdown.serializers.to_html_string', 'markdown.serializers.to_html_string', ({(757, 48, 757, 51): 'new'}, {}), '(new)', False, 'import markdown\n'), ((765, 22, 765, 64), 'markdown.util.parseBoolValue', 'markdown.util.parseBoolValue', ({(765, 51, 765, 56): 'value', (765, 58, 765, 63): '(False)'}, {}), '(value, False)', False, 'import markdown\n'), ((776, 26, 776, 82), 'markdown.util.parseBoolValue', 'markdown.util.parseBoolValue', (), '', False, 'import markdown\n'), ((777, 26, 777, 80), 'markdown.util.parseBoolValue', 'markdown.util.parseBoolValue', (), '', False, 'import markdown\n'), ((800, 12, 800, 36), 'os.remove', 'os.remove', ({(800, 22, 800, 35): 'self.tempfile'}, {}), '(self.tempfile)', False, 'import os\n'), ((870, 21, 870, 38), 'yaml.dump', 'yaml.dump', ({(870, 31, 870, 37): 'config'}, {}), '(config)', False, 'import yaml\n'), ((872, 13, 872, 31), 'os.fdopen', 'os.fdopen', ({(872, 23, 872, 25): 'fd', (872, 27, 872, 30): '"""w"""'}, {}), "(fd, 'w')", False, 'import os\n'), ((916, 32, 916, 50), 'json.dumps', 'json.dumps', ({(916, 43, 916, 49): 'config'}, {}), '(config)', False, 'import json\n'), ((967, 17, 967, 54), 'markdown.util.etree.Element', 'markdown.util.etree.Element', ({(967, 45, 967, 53): 'self.tag'}, {}), '(self.tag)', False, 'import markdown\n'), ((1015, 13, 1015, 49), 'warnings.catch_warnings', 'warnings.catch_warnings', (), '', False, 'import warnings\n'), ((1017, 12, 1017, 43), 'warnings.simplefilter', 'warnings.simplefilter', ({(1017, 34, 1017, 42): '"""always"""'}, {}), "('always')", False, 'import warnings\n'), ((1028, 13, 1028, 49), 'warnings.catch_warnings', 'warnings.catch_warnings', (), '', False, 'import warnings\n'), ((1030, 12, 1030, 43), 'warnings.simplefilter', 'warnings.simplefilter', ({(1030, 34, 1030, 42): '"""always"""'}, {}), "('always')", False, 'import warnings\n'), ((72, 38, 72, 57), 'markdown.extensions.footnotes.FootnoteExtension', 'FootnoteExtension', ({}, {}), '()', False, 'from markdown.extensions.footnotes import FootnoteExtension\n'), ((123, 25, 123, 42), 'sys.stdout.read', 'sys.stdout.read', ({}, {}), '()', False, 'import sys\n'), ((484, 43, 484, 74), 'markdown.extensions.Extension', 'markdown.extensions.Extension', ({}, {}), '()', False, 'import markdown\n')] |
csxeba/nervous | nervous/utility/config.py | f7aeb9b2ff875835c346c607722fab517ef6df61 | import os
class StressedNetConfig:
def __init__(self,
synaptic_environmental_constraint=0.8,
group_environmental_constraint=0.6,
stress_factor=0.8,
save_folder=os.path.expanduser("~/.nervous/models/")):
self._synaptic_environmental_constraint = synaptic_environmental_constraint
self._group_environmental_constraint = group_environmental_constraint
self._stress_factor = stress_factor
self._save_folder = save_folder
self._sanitize()
def _sanitize(self):
if 1. < self._group_environmental_constraint <= 0.:
raise ValueError("Group environmental constraint has to be in the range [0. - 1.)")
if 1. < self._synaptic_environmental_constraint <= 0.:
raise ValueError("Synaptic environmental constraint has to be in the range [0. - 1.)")
if 1. < self._stress_factor <= 0.:
raise ValueError("Stress factor has to be in the range [0. - 1.)")
if not os.path.exists(self._save_folder):
os.makedirs(self._save_folder)
@property
def synaptic_environmental_constraint(self):
return self._synaptic_environmental_constraint
@synaptic_environmental_constraint.setter
def synaptic_environmental_constraint(self, value):
self._synaptic_environmental_constraint = value
self._sanitize()
@property
def group_environmental_constraint(self):
return self._group_environmental_constraint
@group_environmental_constraint.setter
def group_environmental_constraint(self, value):
self._group_environmental_constraint = value
self._sanitize()
@property
def stress_factor(self):
return self._stress_factor
@stress_factor.setter
def stress_factor(self, value):
self._stress_factor = value
self._sanitize()
@property
def save_folder(self):
return self._save_folder
@save_folder.setter
def save_folder(self, value):
self._save_folder = value
self._sanitize()
def __getitem__(self, item):
if item == "self":
raise ValueError("Hahaha")
return self.__dict__[item]
| [((10, 29, 10, 69), 'os.path.expanduser', 'os.path.expanduser', ({(10, 48, 10, 68): '"""~/.nervous/models/"""'}, {}), "('~/.nervous/models/')", False, 'import os\n'), ((24, 15, 24, 48), 'os.path.exists', 'os.path.exists', ({(24, 30, 24, 47): 'self._save_folder'}, {}), '(self._save_folder)', False, 'import os\n'), ((25, 12, 25, 42), 'os.makedirs', 'os.makedirs', ({(25, 24, 25, 41): 'self._save_folder'}, {}), '(self._save_folder)', False, 'import os\n')] |
XinYao1994/mindspore | mindspore/nn/optim/ftrl.py | 2c1a2bf752a1fde311caddba22633d2f4f63cb4e | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FTRL"""
from mindspore.ops import functional as F, composite as C, operations as P
from mindspore.common.parameter import Parameter
from mindspore.common import Tensor
import mindspore.common.dtype as mstype
from mindspore._checkparam import Validator as validator
from mindspore._checkparam import Rel
from .optimizer import Optimizer, apply_decay, grad_scale
ftrl_opt = C.MultitypeFuncGraph("ftrl_opt")
@ftrl_opt.register("Function", "Tensor", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor")
def _tensor_run_opt(opt, learning_rate, l1, l2, lr_power, linear, gradient, weight, moment):
"""Apply ftrl optimizer to the weight parameter."""
success = True
success = F.depend(success, opt(weight, moment, linear, gradient, learning_rate, l1, l2, lr_power))
return success
def _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale=1.0, weight_decay=0.0,
prim_name=None):
"""Check param."""
validator.check_value_type("initial_accum", initial_accum, [float], prim_name)
validator.check_number("initial_accum", initial_accum, 0.0, Rel.GE, prim_name)
validator.check_value_type("learning_rate", learning_rate, [float], prim_name)
validator.check_number("learning_rate", learning_rate, 0.0, Rel.GT, prim_name)
validator.check_value_type("lr_power", lr_power, [float], prim_name)
validator.check_number("lr_power", lr_power, 0.0, Rel.LE, prim_name)
validator.check_value_type("l1", l1, [float], prim_name)
validator.check_number("l1", l1, 0.0, Rel.GE, prim_name)
validator.check_value_type("l2", l2, [float], prim_name)
validator.check_number("l2", l2, 0.0, Rel.GE, prim_name)
validator.check_value_type("use_locking", use_locking, [bool], prim_name)
validator.check_value_type("loss_scale", loss_scale, [float], prim_name)
validator.check_number("loss_scale", loss_scale, 1.0, Rel.GE, prim_name)
validator.check_value_type("weight_decay", weight_decay, [float], prim_name)
validator.check_number("weight_decay", weight_decay, 0.0, Rel.GE, prim_name)
class FTRL(Optimizer):
"""
Implement the FTRL algorithm with ApplyFtrl Operator.
FTRL is an online convex optimization algorithm that adaptively chooses its regularization function
based on the loss functions. Refer to paper `Adaptive Bound Optimization for Online Convex Optimization
<https://arxiv.org/abs/1002.4908>`_. Refer to paper `Ad Click Prediction: a View from the Trenches
<https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf>`_ for engineering document.
Args:
params (list[Parameter]): A list of parameter, which will be updated. The element in `params`
should be Parameter.
initial_accum (float): The starting value for accumulators, must be zero or positive values. Default: 0.1.
learning_rate (float): The learning rate value, should be positive. Default: 0.001.
lr_power (float): Learning rate power controls how the learning rate decreases during training, must be less
than or equal to zero. Use fixed learning rate if lr_power is zero. Default: -0.5.
l1 (float): l1 regularization strength, must be greater than or equal to zero. Default: 0.0.
l2 (float): l2 regularization strength, must be greater than or equal to zero. Default: 0.0.
use_locking (bool): If True use locks for update operation. Default: False.
loss_scale (float): Value for the loss scale. It should be equal to or greater than 1.0. Default: 1.0.
wegith_decay (float): Weight decay value to multiply weight, must be zero or positive value. Default: 0.0.
Inputs:
- **grads** (tuple[Tensor]) - The gradients of `params` in optimizer, the shape is as same as the `params`
in optimizer.
Outputs:
tuple[Parameter], the updated parameters, the shape is the same as `params`.
Examples:
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> opt = nn.FTRL(net.trainable_params())
>>> model = Model(net, loss_fn=loss, optimizer=opt, metrics=None)
"""
def __init__(self, params, initial_accum=0.1, learning_rate=0.001, lr_power=-0.5, l1=0.0, l2=0.0,
use_locking=False, loss_scale=1.0, weight_decay=0.0):
super(FTRL, self).__init__(learning_rate, params)
_check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale, weight_decay,
self.cls_name)
self.moments = self.parameters.clone(prefix="moments", init=initial_accum)
self.linear = self.parameters.clone(prefix="linear", init='zeros')
self.l1 = l1
self.l2 = l2
self.lr_power = lr_power
self.reciprocal_scale = 1.0 / loss_scale
self.weight_decay = weight_decay
self.decay_tf = tuple((lambda: True)() for x in self.parameters)
self.hyper_map = C.HyperMap()
self.opt = P.ApplyFtrl(use_locking=use_locking)
self.one = Tensor(1, mstype.int32)
def construct(self, grads):
params = self.parameters
moments = self.moments
linear = self.linear
if self.weight_decay > 0.0:
grads = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_tf, params, grads)
if self.reciprocal_scale != 1.0:
grads = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), grads)
lr = self.learning_rate
success = self.hyper_map(F.partial(ftrl_opt, self.opt, lr, self.l1, self.l2, self.lr_power),
linear, grads, params, moments)
return success
| [((24, 11, 24, 43), 'mindspore.ops.composite.MultitypeFuncGraph', 'C.MultitypeFuncGraph', ({(24, 32, 24, 42): '"""ftrl_opt"""'}, {}), "('ftrl_opt')", True, 'from mindspore.ops import functional as F, composite as C, operations as P\n'), ((38, 4, 38, 82), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', ({(38, 31, 38, 46): '"""initial_accum"""', (38, 48, 38, 61): 'initial_accum', (38, 63, 38, 70): '[float]', (38, 72, 38, 81): 'prim_name'}, {}), "('initial_accum', initial_accum, [float], prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((39, 4, 39, 82), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', ({(39, 27, 39, 42): '"""initial_accum"""', (39, 44, 39, 57): 'initial_accum', (39, 59, 39, 62): '(0.0)', (39, 64, 39, 70): 'Rel.GE', (39, 72, 39, 81): 'prim_name'}, {}), "('initial_accum', initial_accum, 0.0, Rel.GE, prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((41, 4, 41, 82), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', ({(41, 31, 41, 46): '"""learning_rate"""', (41, 48, 41, 61): 'learning_rate', (41, 63, 41, 70): '[float]', (41, 72, 41, 81): 'prim_name'}, {}), "('learning_rate', learning_rate, [float], prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((42, 4, 42, 82), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', ({(42, 27, 42, 42): '"""learning_rate"""', (42, 44, 42, 57): 'learning_rate', (42, 59, 42, 62): '(0.0)', (42, 64, 42, 70): 'Rel.GT', (42, 72, 42, 81): 'prim_name'}, {}), "('learning_rate', learning_rate, 0.0, Rel.GT, prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((44, 4, 44, 72), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', ({(44, 31, 44, 41): '"""lr_power"""', (44, 43, 44, 51): 'lr_power', (44, 53, 44, 60): '[float]', (44, 62, 44, 71): 'prim_name'}, {}), "('lr_power', lr_power, [float], prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((45, 4, 45, 72), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', ({(45, 27, 45, 37): '"""lr_power"""', (45, 39, 45, 47): 'lr_power', (45, 49, 45, 52): '(0.0)', (45, 54, 45, 60): 'Rel.LE', (45, 62, 45, 71): 'prim_name'}, {}), "('lr_power', lr_power, 0.0, Rel.LE, prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((47, 4, 47, 60), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', ({(47, 31, 47, 35): '"""l1"""', (47, 37, 47, 39): 'l1', (47, 41, 47, 48): '[float]', (47, 50, 47, 59): 'prim_name'}, {}), "('l1', l1, [float], prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((48, 4, 48, 60), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', ({(48, 27, 48, 31): '"""l1"""', (48, 33, 48, 35): 'l1', (48, 37, 48, 40): '(0.0)', (48, 42, 48, 48): 'Rel.GE', (48, 50, 48, 59): 'prim_name'}, {}), "('l1', l1, 0.0, Rel.GE, prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((50, 4, 50, 60), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', ({(50, 31, 50, 35): '"""l2"""', (50, 37, 50, 39): 'l2', (50, 41, 50, 48): '[float]', (50, 50, 50, 59): 'prim_name'}, {}), "('l2', l2, [float], prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((51, 4, 51, 60), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', ({(51, 27, 51, 31): '"""l2"""', (51, 33, 51, 35): 'l2', (51, 37, 51, 40): '(0.0)', (51, 42, 51, 48): 'Rel.GE', (51, 50, 51, 59): 'prim_name'}, {}), "('l2', l2, 0.0, Rel.GE, prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((53, 4, 53, 77), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', ({(53, 31, 53, 44): '"""use_locking"""', (53, 46, 53, 57): 'use_locking', (53, 59, 53, 65): '[bool]', (53, 67, 53, 76): 'prim_name'}, {}), "('use_locking', use_locking, [bool], prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((55, 4, 55, 76), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', ({(55, 31, 55, 43): '"""loss_scale"""', (55, 45, 55, 55): 'loss_scale', (55, 57, 55, 64): '[float]', (55, 66, 55, 75): 'prim_name'}, {}), "('loss_scale', loss_scale, [float], prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((56, 4, 56, 76), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', ({(56, 27, 56, 39): '"""loss_scale"""', (56, 41, 56, 51): 'loss_scale', (56, 53, 56, 56): '(1.0)', (56, 58, 56, 64): 'Rel.GE', (56, 66, 56, 75): 'prim_name'}, {}), "('loss_scale', loss_scale, 1.0, Rel.GE, prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((58, 4, 58, 80), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', ({(58, 31, 58, 45): '"""weight_decay"""', (58, 47, 58, 59): 'weight_decay', (58, 61, 58, 68): '[float]', (58, 70, 58, 79): 'prim_name'}, {}), "('weight_decay', weight_decay, [float], prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((59, 4, 59, 80), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', ({(59, 27, 59, 41): '"""weight_decay"""', (59, 43, 59, 55): 'weight_decay', (59, 57, 59, 60): '(0.0)', (59, 62, 59, 68): 'Rel.GE', (59, 70, 59, 79): 'prim_name'}, {}), "('weight_decay', weight_decay, 0.0, Rel.GE, prim_name)", True, 'from mindspore._checkparam import Validator as validator\n'), ((111, 25, 111, 37), 'mindspore.ops.composite.HyperMap', 'C.HyperMap', ({}, {}), '()', True, 'from mindspore.ops import functional as F, composite as C, operations as P\n'), ((112, 19, 112, 55), 'mindspore.ops.operations.ApplyFtrl', 'P.ApplyFtrl', (), '', True, 'from mindspore.ops import functional as F, composite as C, operations as P\n'), ((113, 19, 113, 42), 'mindspore.common.Tensor', 'Tensor', ({(113, 26, 113, 27): '1', (113, 29, 113, 41): 'mstype.int32'}, {}), '(1, mstype.int32)', False, 'from mindspore.common import Tensor\n'), ((124, 33, 124, 99), 'mindspore.ops.functional.partial', 'F.partial', ({(124, 43, 124, 51): 'ftrl_opt', (124, 53, 124, 61): 'self.opt', (124, 63, 124, 65): 'lr', (124, 67, 124, 74): 'self.l1', (124, 76, 124, 83): 'self.l2', (124, 85, 124, 98): 'self.lr_power'}, {}), '(ftrl_opt, self.opt, lr, self.l1, self.l2, self.lr_power)', True, 'from mindspore.ops import functional as F, composite as C, operations as P\n'), ((120, 35, 120, 76), 'mindspore.ops.functional.partial', 'F.partial', ({(120, 45, 120, 56): 'apply_decay', (120, 58, 120, 75): 'self.weight_decay'}, {}), '(apply_decay, self.weight_decay)', True, 'from mindspore.ops import functional as F, composite as C, operations as P\n'), ((122, 35, 122, 79), 'mindspore.ops.functional.partial', 'F.partial', ({(122, 45, 122, 55): 'grad_scale', (122, 57, 122, 78): 'self.reciprocal_scale'}, {}), '(grad_scale, self.reciprocal_scale)', True, 'from mindspore.ops import functional as F, composite as C, operations as P\n')] |
skimhub/aws-utils | aws_utils/region_selector.py | 5496a7594ab90b1e658e8f9f8137e8943a39be1e | import datetime
import boto3
US_EAST_REGION = {'us-east-1'}
US_EAST_AVAILABILITY_ZONES = {'us-east-1a', 'us-east-1b', 'us-east-1c', 'us-east-1e'} # note d is missing
INSTANCE_VERSION = 'Linux/UNIX (Amazon VPC)'
def fetch_spot_prices(region, start_time, end_time, instance_type, instance_version=INSTANCE_VERSION):
"""Fetches prices of EC2 spot instances from AWS.
Args:
region (str): region to look for instances in
start_time (datetime.datetime):
end_time (datetime.datetime):
instance_type (str):
instance_version (str): the types of instances that we wish to return prices for.
Returns:
yield str, float: yields tuple of avialability_zone and price over the period
Raises: ValueError,
raised in the event that the boto3 response is empty.
"""
conn = boto3.client('ec2', region_name=region)
res = conn.describe_spot_price_history(StartTime=start_time,
EndTime=end_time,
InstanceTypes=[instance_type],
ProductDescriptions=[instance_version])
for item in res['SpotPriceHistory']:
yield item['AvailabilityZone'], float(item['SpotPrice'])
token = res['NextToken']
while token:
res = conn.describe_spot_price_history(StartTime=start_time,
EndTime=end_time,
InstanceTypes=[instance_type],
ProductDescriptions=[instance_version],
NextToken=token)
for item in res['SpotPriceHistory']:
yield item['AvailabilityZone'], float(item['SpotPrice'])
token = res['NextToken']
def fetch_price_stats_per_availability_zone(region, start_time, end_time, instance_type, instance_version=INSTANCE_VERSION,
filter_availability_zones=None):
"""Groups raw prices by region, returns min, max and avg price.
Args:
region (str): region to look for instances in
start_time (datetime.datetime):
end_time (datetime.datetime):
instance_type (str):
instance_version (str): the types of instances that we wish to return prices for.
filter_availability_zones ({str}): if set then we only return a price if the availability zone is in this list
Returns: dict,
{'us-east-1b': {'min': 2.01, 'max': 3.53,'avg':2.8, 'latest':3.0}}
"""
by_zone = {}
for zone, price in fetch_spot_prices(region, start_time, end_time, instance_type, instance_version):
by_zone.setdefault(zone, []).append(price)
prices_per_region = {}
for zone, prices in by_zone.iteritems():
if filter_availability_zones is None or zone in filter_availability_zones:
region_prices = {'min': min(prices),
'max': max(prices),
'avg': sum(prices) / float(len(prices)),
'latest': prices[0]}
prices_per_region[zone] = region_prices
return prices_per_region
def get_cheapest_availability_zone(instance_type, search_regions=US_EAST_REGION,
filter_availability_zones=US_EAST_AVAILABILITY_ZONES, expected_job_length=datetime.timedelta(days=1)):
"""Get the cheapest availability zone from a set of regions. Cheapest is determined by 'latest price + average price'
over the duration that the job is expected to run for
Args:
filter_availability_zones ({str}): We only return results for zones in this set
instance_type (str): Type of aws instance e.g. "m2.4xlarge"
search_regions ({str}): Set of regions we want to look for availability zones in.
expected_job_length (datetime.timedelta): The period we expect the job to run this is used as the amount of time to look back over
for the average
Returns:
(str, {}) : e.g. ('us-east-1b': {'min': 2.01, 'max': 3.53,'avg':2.8, 'latest':3.0})
"""
if isinstance(search_regions, str):
search_regions = {search_regions}
aggregated_prices = {}
for region in search_regions:
result_stats = fetch_price_stats_per_availability_zone(region,
datetime.datetime.utcnow() - expected_job_length,
datetime.datetime.utcnow(),
instance_type,
filter_availability_zones=filter_availability_zones)
if not len(result_stats):
raise Exception("No valid avialability zones found for region %s" % (region,))
aggregated_prices.update(result_stats)
cheapest_availability_zone, stats = min(aggregated_prices.iteritems(), key=lambda x: x[1]['avg'] + x[1]['latest'])
return cheapest_availability_zone, stats
| [((28, 11, 28, 50), 'boto3.client', 'boto3.client', (), '', False, 'import boto3\n'), ((82, 109, 82, 135), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((103, 63, 103, 89), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ({}, {}), '()', False, 'import datetime\n'), ((102, 63, 102, 89), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ({}, {}), '()', False, 'import datetime\n')] |
jkae/knn-exercise | pynn/__init__.py | ae569e3f6a0e23669369d99e032270e72f8fbb66 |
from .nearest_neighbor_index import NearestNeighborIndex
from .kd_tree import *
| [] |
threecifanggen/python-functional-programming | tests/test_try.py | bd17281e5f24db826266f509bc54b25362c0d2a1 | '''
Author: huangbaochen<[email protected]>
Date: 2021-12-11 20:04:19
LastEditTime: 2021-12-11 21:46:16
LastEditors: huangbaochen<[email protected]>
Description: 测试Try单子
No MERCY
'''
import pytest
from fppy.try_monad import Try, Success, Fail
from fppy.option import Just, Nothing
@pytest.mark.try_monad
def test_try_apply():
assert Try.apply(1) == Success(1)
assert Try(1) == Success(1)
@pytest.mark.try_monad
def test_try_unapply():
assert Success.unapply(Success(1)) == Just(1)
assert Fail.unapply(Fail(TypeError(), 1)) == Nothing()
with pytest.raises(TypeError):
Fail.unapply(1)
with pytest.raises(TypeError):
Fail.unapply(Success(1))
with pytest.raises(TypeError):
Success.unapply(1)
with pytest.raises(TypeError):
Success.unapply(Fail(Exception(), 1))
def test_try_monad_map():
assert Success(1).map(lambda x: x + 1) == Success(2)
assert Success(1).map(lambda x: x / 0) ==\
Fail(ZeroDivisionError('division by zero'), 1)
assert Fail(ZeroDivisionError('division by zero'), 1)\
.map(lambda x: x + 1) ==\
Fail(ZeroDivisionError('division by zero'), 1)
@pytest.mark.try_monad
def test_try_monad_flat_map():
assert Success(1).flat_map(lambda x: Success(2)) == Success(2)
assert Fail(ZeroDivisionError('division by zero'), 1)\
.flat_map(lambda x: Success(1)) ==\
Fail(ZeroDivisionError('division by zero'), 1)
with pytest.raises(TypeError):
Success(1).flat_map(lambda x: x + 1)
@pytest.mark.try_monad
def test_try_monad_eq():
assert Fail(ZeroDivisionError('division by zero'), 1) ==\
Fail(ZeroDivisionError('division by zero'), 1)
assert Fail(ZeroDivisionError('division by'), 1) !=\
Fail(ZeroDivisionError('division by zero'), 1)
assert Fail(ZeroDivisionError('division by zero'), 0) !=\
Fail(ZeroDivisionError('division by zero'), 1)
@pytest.mark.try_monad
def test_try_monad_get():
assert Fail(ZeroDivisionError('division by zero'), 1)\
.get().args ==\
ZeroDivisionError('division by zero').args
assert Success(1).get() == 1
# pylint: disable=no-member
assert Try("s").get() == "s"
@pytest.mark.try_monad
def test_try_monad_get_or_else():
assert Fail(ZeroDivisionError('division by zero'), 1)\
.get_or_else(2) == 2
assert Success(1).get_or_else(2) == 1
@pytest.mark.try_monad
def test_try_monad_get_error_input():
assert Fail(ZeroDivisionError('division by zero'), 1)\
.get_error_input() == 1
| [((15, 11, 15, 23), 'fppy.try_monad.Try.apply', 'Try.apply', ({(15, 21, 15, 22): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((15, 27, 15, 37), 'fppy.try_monad.Success', 'Success', ({(15, 35, 15, 36): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((16, 11, 16, 17), 'fppy.try_monad.Try', 'Try', ({(16, 15, 16, 16): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((16, 21, 16, 31), 'fppy.try_monad.Success', 'Success', ({(16, 29, 16, 30): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((20, 42, 20, 49), 'fppy.option.Just', 'Just', ({(20, 47, 20, 48): '(1)'}, {}), '(1)', False, 'from fppy.option import Just, Nothing\n'), ((21, 49, 21, 58), 'fppy.option.Nothing', 'Nothing', ({}, {}), '()', False, 'from fppy.option import Just, Nothing\n'), ((23, 9, 23, 33), 'pytest.raises', 'pytest.raises', ({(23, 23, 23, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((24, 8, 24, 23), 'fppy.try_monad.Fail.unapply', 'Fail.unapply', ({(24, 21, 24, 22): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((26, 9, 26, 33), 'pytest.raises', 'pytest.raises', ({(26, 23, 26, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((29, 9, 29, 33), 'pytest.raises', 'pytest.raises', ({(29, 23, 29, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((30, 8, 30, 26), 'fppy.try_monad.Success.unapply', 'Success.unapply', ({(30, 24, 30, 25): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((32, 9, 32, 33), 'pytest.raises', 'pytest.raises', ({(32, 23, 32, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((36, 46, 36, 56), 'fppy.try_monad.Success', 'Success', ({(36, 54, 36, 55): '(2)'}, {}), '(2)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((45, 56, 45, 66), 'fppy.try_monad.Success', 'Success', ({(45, 64, 45, 65): '(2)'}, {}), '(2)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((50, 9, 50, 33), 'pytest.raises', 'pytest.raises', ({(50, 23, 50, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((20, 27, 20, 37), 'fppy.try_monad.Success', 'Success', ({(20, 35, 20, 36): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((27, 21, 27, 31), 'fppy.try_monad.Success', 'Success', ({(27, 29, 27, 30): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((36, 11, 36, 21), 'fppy.try_monad.Success', 'Success', ({(36, 19, 36, 20): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((37, 11, 37, 21), 'fppy.try_monad.Success', 'Success', ({(37, 19, 37, 20): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((45, 11, 45, 21), 'fppy.try_monad.Success', 'Success', ({(45, 19, 45, 20): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((45, 41, 45, 51), 'fppy.try_monad.Success', 'Success', ({(45, 49, 45, 50): '(2)'}, {}), '(2)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((47, 28, 47, 38), 'fppy.try_monad.Success', 'Success', ({(47, 36, 47, 37): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((51, 8, 51, 18), 'fppy.try_monad.Success', 'Success', ({(51, 16, 51, 17): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((70, 11, 70, 21), 'fppy.try_monad.Success', 'Success', ({(70, 19, 70, 20): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n'), ((73, 11, 73, 19), 'fppy.try_monad.Try', 'Try', ({(73, 15, 73, 18): '"""s"""'}, {}), "('s')", False, 'from fppy.try_monad import Try, Success, Fail\n'), ((80, 11, 80, 21), 'fppy.try_monad.Success', 'Success', ({(80, 19, 80, 20): '(1)'}, {}), '(1)', False, 'from fppy.try_monad import Try, Success, Fail\n')] |
yammesicka/calendar | app/internal/daily_quotes.py | 7c15a24883dbdffb563b6d3286c2d458e4a1c9c0 | from datetime import date
from typing import Dict, Optional
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import func
from app.database.models import Quote
TOTAL_DAYS = 366
def create_quote_object(quotes_fields: Dict[str, Optional[str]]) -> Quote:
"""This function create a quote object from given fields dictionary.
It is used for adding the data from the json into the db"""
return Quote(
text=quotes_fields['text'],
author=quotes_fields['author']
)
def quote_per_day(
session: Session, date: date = date.today()
) -> Optional[Quote]:
"""This function provides a daily quote, relevant to the current
day of the year. The quote is randomally selected from a set
of quotes matching to the given day"""
day_num = date.timetuple().tm_yday
quote = session.query(Quote).filter(
Quote.id % TOTAL_DAYS == day_num).order_by(func.random()).first()
return quote
| [((15, 11, 18, 5), 'app.database.models.Quote', 'Quote', (), '', False, 'from app.database.models import Quote\n'), ((22, 39, 22, 51), 'datetime.date.today', 'date.today', ({}, {}), '()', False, 'from datetime import date\n'), ((27, 14, 27, 30), 'datetime.date.timetuple', 'date.timetuple', ({}, {}), '()', False, 'from datetime import date\n'), ((29, 51, 29, 64), 'sqlalchemy.sql.expression.func.random', 'func.random', ({}, {}), '()', False, 'from sqlalchemy.sql.expression import func\n')] |
viing937/codeforces | src/789A.py | d694eb6967cd56af02963c3a662066048cb78d07 | n, k = map(int, input().split())
w = list(map(int, input().split()))
r = sum(map(lambda x: (x+k-1)//k, w))
print((r+1)//2)
| [] |
leyyin/godot | platform/server/detect.py | 68325d7254db711beaedddad218e2cddb405c42c |
import os
import sys
def is_active():
return True
def get_name():
return "Server"
def can_build():
if (os.name!="posix"):
return False
return True # enabled
def get_opts():
return [
('use_llvm','Use llvm compiler','no'),
('force_32_bits','Force 32 bits binary','no')
]
def get_flags():
return [
('builtin_zlib', 'no'),
]
def configure(env):
env.Append(CPPPATH=['#platform/server'])
if (env["use_llvm"]=="yes"):
env["CC"]="clang"
env["CXX"]="clang++"
env["LD"]="clang++"
if (env["colored"]=="yes"):
if sys.stdout.isatty():
env.Append(CXXFLAGS=["-fcolor-diagnostics"])
is64=sys.maxsize > 2**32
if (env["bits"]=="default"):
if (is64):
env["bits"]="64"
else:
env["bits"]="32"
#if (env["tools"]=="no"):
# #no tools suffix
# env['OBJSUFFIX'] = ".nt"+env['OBJSUFFIX']
# env['LIBSUFFIX'] = ".nt"+env['LIBSUFFIX']
if (env["target"]=="release"):
env.Append(CCFLAGS=['-O2','-ffast-math','-fomit-frame-pointer'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['-O2','-ffast-math','-DDEBUG_ENABLED'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['-g2', '-Wall','-DDEBUG_ENABLED','-DDEBUG_MEMORY_ENABLED'])
env.Append(CPPFLAGS=['-DSERVER_ENABLED','-DUNIX_ENABLED'])
env.Append(LIBS=['pthread','z']) #TODO detect linux/BSD!
if (env["CXX"]=="clang++"):
env.Append(CPPFLAGS=['-DTYPED_METHOD_BIND'])
env["CC"]="clang"
env["LD"]="clang++"
| [((43, 6, 43, 25), 'sys.stdout.isatty', 'sys.stdout.isatty', ({}, {}), '()', False, 'import sys\n')] |
SnipsMine/ETS2-Speedrun-Tool | telemetry/Truck.py | 5ac87e4bc88be67ff4954b2b98772ff14a65eb48 | from telemetry.TruckConstants import ConstantValues
from telemetry.TruckCurrent import CurrentValues
from telemetry.TruckPositioning import Positioning
class TruckValues:
constant_values = None
current_values = None
positioning = None
def __init__(self):
self.current_values = CurrentValues()
self.constant_values = ConstantValues()
self.positioning = Positioning()
| [((13, 30, 13, 45), 'telemetry.TruckCurrent.CurrentValues', 'CurrentValues', ({}, {}), '()', False, 'from telemetry.TruckCurrent import CurrentValues\n'), ((14, 31, 14, 47), 'telemetry.TruckConstants.ConstantValues', 'ConstantValues', ({}, {}), '()', False, 'from telemetry.TruckConstants import ConstantValues\n'), ((15, 27, 15, 40), 'telemetry.TruckPositioning.Positioning', 'Positioning', ({}, {}), '()', False, 'from telemetry.TruckPositioning import Positioning\n')] |
petersontylerd/spark-courses | IntroToSpark/Assign4_Q1-6_action.py | e8dcb4968ea31a50206739e6af3006889f8c3c6c |
import csv
from pyspark.sql import SparkSession
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.appName("Assignment4").getOrCreate()
sc = spark.sparkContext
# load data to dataframe
path = 'fake_data.csv'
df = spark.read.format('csv').option('header','true').load(path)
# cast income as an integer
df = df.withColumn('Income', df['Income'].cast(IntegerType()))
# Question 1
print('*' * 30)
print('\nQuestion 1\n')
print(df.rdd.map(lambda x: (x[1], x[0])).groupByKey().mapValues(lambda vals: len(set(vals))).sortBy(lambda a: a[1], ascending = False).take(1))
print('\n\n')
# Question 2
print('*' * 30)
print('\nQuestion 2\n')
print(df.rdd.filter(lambda v: v[1] == 'United States of America').map(lambda x: (x[1], x[4])).groupByKey().mapValues(lambda x: sum(x) / len(x)).collect())
print('\n\n')
# Question 3
print('*' * 30)
print('\nQuestion 3\n')
print(df.rdd.filter(lambda v: v[4] > 100000).filter(lambda v: v[7] == 'FALSE').count())
print('\n\n')
# Question 4
print('*' * 30)
print('\nQuestion 4\n')
print(df.rdd.filter(lambda v: v[1] == 'United States of America').sortBy(lambda x: x[4], ascending = False).map(lambda x: (x[3], x[6], x[4], x[5])).take(10))
print('\n\n')
# Question 5
print('*' * 30)
print('\nQuestion 5\n')
print(df.rdd.groupBy(lambda x: x[5]).count())
print('\n\n')
# Question 6
print('*' * 30)
print('\nQuestion 6\n')
print(df.rdd.filter(lambda v: v[5] == 'Writer').filter(lambda x: x[4] < 100000).count())
print('\n\n')
| [((6, 8, 6, 51), 'pyspark.sql.SparkSession.builder.appName', 'SparkSession.builder.appName', ({(6, 37, 6, 50): '"""Assignment4"""'}, {}), "('Assignment4')", False, 'from pyspark.sql import SparkSession\n'), ((14, 47, 14, 60), 'pyspark.sql.types.IntegerType', 'IntegerType', ({}, {}), '()', False, 'from pyspark.sql.types import IntegerType\n')] |
zipmex/fire | src/firebot/tests/factories.py | a41bbdbc86085c055ae4706fadea4f142e881a85 | import factory
from django.contrib.auth import get_user_model
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = get_user_model()
first_name = factory.Faker('name')
last_name = factory.Faker('name')
email = factory.Faker('email')
| [((10, 17, 10, 38), 'factory.Faker', 'factory.Faker', ({(10, 31, 10, 37): '"""name"""'}, {}), "('name')", False, 'import factory\n'), ((11, 16, 11, 37), 'factory.Faker', 'factory.Faker', ({(11, 30, 11, 36): '"""name"""'}, {}), "('name')", False, 'import factory\n'), ((12, 12, 12, 34), 'factory.Faker', 'factory.Faker', ({(12, 26, 12, 33): '"""email"""'}, {}), "('email')", False, 'import factory\n'), ((8, 16, 8, 32), 'django.contrib.auth.get_user_model', 'get_user_model', ({}, {}), '()', False, 'from django.contrib.auth import get_user_model\n')] |
Bestfast/reamberPy | reamber/o2jam/O2JHold.py | 91b76ca6adf11fbe8b7cee7c186481776a4d7aaa | from dataclasses import dataclass, field
from reamber.base.Hold import Hold, HoldTail
from reamber.o2jam.O2JNoteMeta import O2JNoteMeta
@dataclass
class O2JHoldTail(HoldTail, O2JNoteMeta):
pass
@dataclass
class O2JHold(Hold, O2JNoteMeta):
""" Defines the O2Jam Bpm Object
The O2Jam Bpm Object is stored in binary file .ojn
"""
_tail: O2JHoldTail = field(init=False)
def _upcastTail(self, **kwargs) -> O2JHoldTail:
return O2JHoldTail(**kwargs)
| [((18, 25, 18, 42), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n')] |
Peacebot-Development/Peacebot-v2 | peacebot/core/plugins/Miscellaneous/__init__.py | 79ab87b12cd60b708631d96021ac3d3eaeee01c9 | import lightbulb
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from peacebot.core.utils.time import TimeConverter
def fetch_scheduler(ctx: lightbulb.Context) -> AsyncIOScheduler:
return ctx.bot.d.scheduler
async def convert_time(ctx: lightbulb.Context, time: str) -> float:
seconds = await TimeConverter.convert(TimeConverter, ctx, time)
return seconds
async def send_remainder(ctx: lightbulb.Context, text: str) -> None:
await ctx.respond(
f"{ctx.author.mention} Remainder: `{text}`",
user_mentions=True,
)
| [((12, 20, 12, 67), 'peacebot.core.utils.time.TimeConverter.convert', 'TimeConverter.convert', ({(12, 42, 12, 55): 'TimeConverter', (12, 57, 12, 60): 'ctx', (12, 62, 12, 66): 'time'}, {}), '(TimeConverter, ctx, time)', False, 'from peacebot.core.utils.time import TimeConverter\n')] |
laichimirum/docker-appium-emulator | example/android/python/msite_simple_default_browser.py | 3549c5f1fc09bbc650dd30351ad4f509a72a90fa | import unittest
from appium import webdriver
class MSiteDefaultBrowserAndroidUITests(unittest.TestCase):
def setUp(self):
# Default browser does not exist for android >= 6.0
desired_caps = {
'platformName': 'Android',
'deviceName': 'Android Emulator',
'appPackage': 'com.android.browser',
'appActivity': 'com.android.browser.BrowserActivity',
'avd': 'samsung_galaxy_s6_6.0'
}
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
def test_open_url(self):
self.driver.get('http://targeturl.com')
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(MSiteDefaultBrowserAndroidUITests)
unittest.TextTestRunner(verbosity=2).run(suite)
| [((18, 22, 18, 84), 'appium.webdriver.Remote', 'webdriver.Remote', ({(18, 39, 18, 69): '"""http://127.0.0.1:4723/wd/hub"""', (18, 71, 18, 83): 'desired_caps'}, {}), "('http://127.0.0.1:4723/wd/hub', desired_caps)", False, 'from appium import webdriver\n'), ((27, 12, 27, 33), 'unittest.TestLoader', 'unittest.TestLoader', ({}, {}), '()', False, 'import unittest\n'), ((28, 4, 28, 40), 'unittest.TextTestRunner', 'unittest.TextTestRunner', (), '', False, 'import unittest\n')] |
sola-st/Nalin | src/nn/dataset_utils/types_processing.py | 3a6f95cec95d9152a65af970cfbb145179b0bd72 | """
Created on 17-June-2020
@author Jibesh Patra
The types extracted during runtime usually look something like --> <class 'numpy.ndarray'> or
<class 'seaborn.palettes._ColorPalette'> change them to --> ndarray, ColorPalette
"""
import re
remove_chars = re.compile(r'>|\'|<|(class )|_|(type)')
def process_types(tp: str) -> str:
cleaned_type = remove_chars.sub('', tp)
cleaned_type = cleaned_type.split('.')[-1].strip()
return cleaned_type
| [((12, 15, 12, 54), 're.compile', 're.compile', ({(12, 26, 12, 53): '""">|\\\\\'|<|(class )|_|(type)"""'}, {}), '(">|\\\\\'|<|(class )|_|(type)")', False, 'import re\n')] |
soootaleb/spare | src/canvas.py | b454b9a8861df55c29fe55b4b584248a2ffe79cb | from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.ticker as ticker
import numpy as np
import random, matplotlib.pyplot as plt
class PlotCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.plot()
def plot(self):
data = [random.random() for i in range(25)]
ax = self.figure.add_subplot(111)
ax.plot(data, 'r-')
ax.set_title('PyQt Matplotlib Example')
class ImageCanvas(FigureCanvas):
def __init__(self, parent = None, width = 5, height = 4, dpi=100):
fig = Figure(figsize = (width, height), dpi = dpi, frameon = False)
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
self.axes = fig.add_subplot(111)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def plot(self, image):
self.axes.axis('off')
display = image.image
display[display == 1] = 255
if image.color:
self.axes.imshow(image.image)
else :
self.axes.imshow(display, cmap = "gray")
self.show()
class HistogramCanvas(FigureCanvas):
'''
This class is used to plt the histogram of the two objects in the main module.
the values are computed in one of the descriptors.
'''
def __init__(self, parent = None, is_polar = True, width = 8, height = 5, dpi = 100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
FigureCanvas.__init__(self, self.fig)
self.is_polar = is_polar
self.setParent(parent)
if self.is_polar:
self.axes = self.fig.add_subplot(111, projection='polar')
else :
self.axes = self.fig.add_subplot(111)
self.axes.grid(True)
#TODO : Add the names of the objects (fname - extention ?)
FigureCanvas.updateGeometry(self)
def plot(self, histogram, color = None):
self.axes.set_title("Spatial relations between A and B", va='bottom')
if self.is_polar:
self.axes.set_rlim(0,1)
theta = [float(k)/ 180 * np.pi for k in histogram.values.keys()]
#TODO : refractor this ugly-but-working code
if len(theta) > 16:
i = 0
theta_major_name = []
for k in histogram.values.keys():
if i % 3 == 0:
theta_major_name.append(float(k)/ 180 * np.pi)
i+=1
self.axes.xaxis.set_major_locator(ticker.FixedLocator(theta_major_name))
else :
self.axes.xaxis.set_major_locator(ticker.LinearLocator(len(theta)))
self.axes.xaxis.set_minor_locator(ticker.LinearLocator(len(theta)))
self.axes.grid(b = True, which='major', linestyle='-')
self.axes.grid(b = True, which='minor', linestyle='--')
self.axes.plot(theta, list(histogram.values.values()))
else:
self.axes.plot(list(histogram.values.keys()), list(histogram.values.values()))
# self.axes.plot(list(histogram.values.keys()), list(histogram.gaussian), color="red", ls='--')
self.draw()
def clear(self):
self.axes.clear()
def lin_or_polar(self, new_value : bool):
'''
set the type of the histogram to be polar or linear.
'''
self.is_polar = new_value
self.fig.clear()
if self.is_polar:
self.axes = self.fig.add_subplot(111, projection='polar')
else :
self.axes = self.fig.add_subplot(111)
FigureCanvas.updateGeometry(self)
| [((18, 14, 18, 54), 'matplotlib.figure.Figure', 'Figure', (), '', False, 'from matplotlib.figure import Figure\n'), ((21, 8, 21, 40), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__', 'FigureCanvas.__init__', ({(21, 30, 21, 34): 'self', (21, 36, 21, 39): 'fig'}, {}), '(self, fig)', True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((24, 8, 24, 86), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.setSizePolicy', 'FigureCanvas.setSizePolicy', ({(24, 35, 24, 39): 'self', (24, 41, 24, 62): 'QSizePolicy.Expanding', (24, 64, 24, 85): 'QSizePolicy.Expanding'}, {}), '(self, QSizePolicy.Expanding, QSizePolicy.Expanding)', True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((25, 8, 25, 41), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry', 'FigureCanvas.updateGeometry', ({(25, 36, 25, 40): 'self'}, {}), '(self)', True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((38, 14, 38, 75), 'matplotlib.figure.Figure', 'Figure', (), '', False, 'from matplotlib.figure import Figure\n'), ((40, 8, 40, 40), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__', 'FigureCanvas.__init__', ({(40, 30, 40, 34): 'self', (40, 36, 40, 39): 'fig'}, {}), '(self, fig)', True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((46, 8, 46, 86), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.setSizePolicy', 'FigureCanvas.setSizePolicy', ({(46, 35, 46, 39): 'self', (46, 41, 46, 62): 'QSizePolicy.Expanding', (46, 64, 46, 85): 'QSizePolicy.Expanding'}, {}), '(self, QSizePolicy.Expanding, QSizePolicy.Expanding)', True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((47, 8, 47, 41), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry', 'FigureCanvas.updateGeometry', ({(47, 36, 47, 40): 'self'}, {}), '(self)', True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((66, 19, 66, 59), 'matplotlib.figure.Figure', 'Figure', (), '', False, 'from matplotlib.figure import Figure\n'), ((67, 8, 67, 45), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__', 'FigureCanvas.__init__', ({(67, 30, 67, 34): 'self', (67, 36, 67, 44): 'self.fig'}, {}), '(self, self.fig)', True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((78, 8, 78, 41), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry', 'FigureCanvas.updateGeometry', ({(78, 36, 78, 40): 'self'}, {}), '(self)', True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((120, 8, 120, 41), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry', 'FigureCanvas.updateGeometry', ({(120, 36, 120, 40): 'self'}, {}), '(self)', True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((30, 16, 30, 31), 'random.random', 'random.random', ({}, {}), '()', False, 'import random, matplotlib.pyplot as plt\n'), ((94, 50, 94, 87), 'matplotlib.ticker.FixedLocator', 'ticker.FixedLocator', ({(94, 70, 94, 86): 'theta_major_name'}, {}), '(theta_major_name)', True, 'import matplotlib.ticker as ticker\n')] |
GeorgBraun/cpg_scpi_python | src/cpg_scpi/test/__init__.py | ec74c15beaac0b002fb996a42f4e66ea369e1fc6 | '''Functional tests for CPG'''
from .. import CircuitPlayground
from .. import __version__ as CircuitPlaygroundVersion
import time
def funcTest(timestamps: bool = False) -> None:
cpg = CircuitPlayground()
if timestamps:
_printFuncTestHeadingWithDeliLine(f'cpg_scpi v{CircuitPlaygroundVersion}\nRUNNING SOME FUNCTIONAL-TESTS WITH THE CPG with timestamps ...\n')
else:
_printFuncTestHeadingWithDeliLine(f'cpg_scpi v{CircuitPlaygroundVersion}\nRUNNING SOME FUNCTIONAL-TESTS WITH THE CPG without timestamps ...\n')
# test_led(cpg)
# test_buttonAny(cpg, timestamps)
# test_switch(cpg, timestamps)
test_temp(cpg, timestamps)
test_light(cpg, timestamps)
test_acc(cpg, timestamps)
test_touch(cpg, timestamps)
_printFuncTestHeadingWithDeliLine('DONE WITH FUNCTIONAL-TESTS')
_printFuncTestDeliLine()
def _printCountdown(start: int = 3) -> None:
for i in range(start, 0, -1):
print(i, end=" ", flush=True)
time.sleep(1)
print('', flush=True)
def _printFuncTestDeliLine() -> None:
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
def _printFuncTestHeadingWithDeliLine(heading) -> None:
_printFuncTestDeliLine()
print(heading)
def test_buttonAny(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | any button |'
outFormat = '| {:5} | {:12.3f} | {!s:10} |'
else:
outHeading = '| count | any button |'
outFormat = '| {:5} | {!s:10} |'
_printFuncTestHeadingWithDeliLine('Button-Test: Press left or right button...')
print(outHeading)
_printCountdown(3)
count = 10
for i in range(count):
result = (count-i, *cpg.buttonAny_wts()) if timestamps else (count-i, cpg.buttonAny())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_switch(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | switch |'
outFormat = '| {:5} | {:12.3f} | {!s:6} |'
else:
outHeading = '| count | switch |'
outFormat = '| {:5} | {!s:6} |'
_printFuncTestHeadingWithDeliLine('Switch-Test: Change slider switch position...')
print(outHeading)
_printCountdown(3)
count = 10
for i in range(count):
result = (count-i, *cpg.switch_wts()) if timestamps else (count-i, cpg.switch())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_temp(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | temp °C |'
outFormat = '| {:5} | {:12.3f} | {:7.2f} |'
else:
outHeading = '| count | temp °C |'
outFormat = '| {:5} | {:7.2f} |'
_printFuncTestHeadingWithDeliLine('Temp-Sensor-Test ...')
print(outHeading)
_printCountdown(3)
count = 20
for i in range(count):
result = (count-i, *cpg.temp_wts()) if timestamps else (count-i, cpg.temp())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_light(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | light |'
outFormat = '| {:5} | {:12.3f} | {:5} |'
else:
outHeading = '| count | light |'
outFormat = '| {:5} | {:5} |'
_printFuncTestHeadingWithDeliLine('Light-Sensor-Test: Move hand over light sensor...')
print(outHeading)
_printCountdown(3)
count = 20
for i in range(count):
result = (count-i, *cpg.light_wts()) if timestamps else (count-i, cpg.light())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_acc(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | x m/s^2 | y m/s^2 | z m/s^2 |'
outFormat = '| {:5} | {:12.3f} | {:7.2f} | {:7.2f} | {:7.2f} |'
testFunction = cpg.acc_wts
else:
outHeading = '| count | x m/s^2 | y m/s^2 | z m/s^2 |'
outFormat = '| {:5} | {:7.2f} | {:7.2f} | {:7.2f} |'
testFunction = cpg.acc
_printFuncTestHeadingWithDeliLine('Accelerometer-Test: Tilt the CPG board...')
print(outHeading)
_printCountdown(3)
count = 60
for i in range(count):
print(outFormat.format(count-i, *testFunction()))
cpg.wait(0.2)
def test_touch(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | touch | binary |'
outFormat = '| {0:5} | {1:12.3f} | {2:5} | {2:08b} |'
else:
outHeading = '| count | touch | binary |'
outFormat = '| {0:5} | {1:5} | {1:08b} |'
_printFuncTestHeadingWithDeliLine('Touch-Sensor-Test: Touch capacitive sensor pads...')
print(outHeading)
_printCountdown(3)
count = 30
for i in range(count):
result = (count-i, *cpg.touch_wts()) if timestamps else (count-i, cpg.touch())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_led(cpg) -> None:
'''Flash LEDs and run a short chasing light.'''
_printFuncTestHeadingWithDeliLine('LED-Test: Flash LEDs and run a short chasing light...')
print('flashing LEDs...')
test_ledDemo(cpg)
value=1
# print('| val | LEDs |')
for i in range(10):
# print(f'| {value:4} | {value:010b} |')
cpg.led(value)
cpg.wait(0.2)
value <<= 1 # shift 1 bit to the left
for i in range(10):
value >>= 1 # shift 1 bit to the right
# print(f'| {value:4} | {value:010b} |')
cpg.led(value)
cpg.wait(0.2)
print('flashing LEDs...')
test_ledDemo(cpg)
def test_ledDemo(cpg) -> None:
'''Flash LEDs three times.'''
for i in range(3):
cpg.ledDemo()
cpg.wait(0.2)
def testAccSpeed(cpg, iterations: int = 100) -> None:
'''Measure how long it takes to do an accelerometer measurement.'''
print(f'Testing acc measurement speed with {iterations} iterations. Please wait ...')
import timeit
result = timeit.Timer(stmt=lambda: cpg.acc(), setup='pass').timeit(number=iterations)
print(f'Total time: {result:.1f} seconds.')
print(f'On average {(result*1000/iterations):.1f} ms per measurement.')
def testLightSpeed(cpg, iterations: int = 100) -> None:
'''Measure how long it takes to do a light sensor measurement.'''
print(f'Testing light measurement speed with {iterations} iterations. Please wait ...')
import timeit
result = timeit.Timer(stmt=lambda: cpg.light(), setup='pass').timeit(number=iterations)
print(f'Total time: {result:.1f} seconds.')
print(f'On average {(result*1000/iterations):.1f} ms per measurement.')
def _testResponseWaitTime(cpg, iterations: int = 10000) -> None:
'''Test it the wait time for additional, unexpected responses is long enough.'''
print(f'Testing Response-Wait-Time with {iterations} iterations ...')
for i in range(iterations):
if i%100==0: print('try-count', i)
try:
# Request acc measurement values, but do not expect any response, even if the CPG will send one.
cpg._query('MEAS:ACC?', 0)
# If we are still here, we did not get a response. This is bad.
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
print('ERROR in testResponseWaitTime(): CPG-Response was too late.')
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
except Exception:
# The normal behavior is a response, resulting in an exception.
# This is what we expected. Therefore, just continue.
pass
| [((29, 8, 29, 21), 'time.sleep', 'time.sleep', ({(29, 19, 29, 20): '(1)'}, {}), '(1)', False, 'import time\n')] |
yejun1060/SbjctSclctn | main/models.py | eca6a9d09cf81fce262ea58ca90e69ee5735ab16 | from django.db import models
class Account(models.Model):
clsNb = models.IntegerField()
Name = models.CharField(max_length=10)
pw = models.IntegerField()
def __str__(self):
return self.Name | [((5, 12, 5, 33), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((6, 11, 6, 42), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((7, 9, 7, 30), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import models\n')] |
shrey-bansal/pytorch_geometric | test/utils/test_geodesic.py | 17108a08066b0a73530544d01719b186f2625ef2 | from math import sqrt
import torch
from torch_geometric.utils import geodesic_distance
def test_geodesic_distance():
pos = torch.Tensor([[0, 0, 0], [2, 0, 0], [0, 2, 0], [2, 2, 0]])
face = torch.tensor([[0, 1, 3], [0, 2, 3]]).t()
out = geodesic_distance(pos, face)
expected = [
[0, 1, 1, sqrt(2)],
[1, 0, sqrt(2), 1],
[1, sqrt(2), 0, 1],
[sqrt(2), 1, 1, 0],
]
assert torch.allclose(out, torch.tensor(expected))
assert torch.allclose(out, geodesic_distance(pos, face, num_workers=-1))
out = geodesic_distance(pos, face, norm=False)
expected = [
[0, 2, 2, 2 * sqrt(2)],
[2, 0, 2 * sqrt(2), 2],
[2, 2 * sqrt(2), 0, 2],
[2 * sqrt(2), 2, 2, 0],
]
assert torch.allclose(out, torch.tensor(expected))
src = torch.tensor([0, 0, 0, 0])
dest = torch.tensor([0, 1, 2, 3])
out = geodesic_distance(pos, face, src=src, dest=dest)
expected = [0, 1, 1, sqrt(2)]
assert torch.allclose(out, torch.tensor(expected))
out = geodesic_distance(pos, face, src=src[0:1])
expected = [0, 1, 1, sqrt(2)]
assert torch.allclose(out, torch.tensor(expected))
out = geodesic_distance(pos, face, dest=dest)
expected = [0, 0, 0, 0]
assert torch.allclose(out, torch.Tensor(expected))
| [((8, 10, 8, 68), 'torch.Tensor', 'torch.Tensor', ({(8, 23, 8, 67): '[[0, 0, 0], [2, 0, 0], [0, 2, 0], [2, 2, 0]]'}, {}), '([[0, 0, 0], [2, 0, 0], [0, 2, 0], [2, 2, 0]])', False, 'import torch\n'), ((11, 10, 11, 38), 'torch_geometric.utils.geodesic_distance', 'geodesic_distance', ({(11, 28, 11, 31): 'pos', (11, 33, 11, 37): 'face'}, {}), '(pos, face)', False, 'from torch_geometric.utils import geodesic_distance\n'), ((21, 10, 21, 50), 'torch_geometric.utils.geodesic_distance', 'geodesic_distance', (), '', False, 'from torch_geometric.utils import geodesic_distance\n'), ((30, 10, 30, 36), 'torch.tensor', 'torch.tensor', ({(30, 23, 30, 35): '[0, 0, 0, 0]'}, {}), '([0, 0, 0, 0])', False, 'import torch\n'), ((31, 11, 31, 37), 'torch.tensor', 'torch.tensor', ({(31, 24, 31, 36): '[0, 1, 2, 3]'}, {}), '([0, 1, 2, 3])', False, 'import torch\n'), ((32, 10, 32, 58), 'torch_geometric.utils.geodesic_distance', 'geodesic_distance', (), '', False, 'from torch_geometric.utils import geodesic_distance\n'), ((36, 10, 36, 52), 'torch_geometric.utils.geodesic_distance', 'geodesic_distance', (), '', False, 'from torch_geometric.utils import geodesic_distance\n'), ((40, 10, 40, 49), 'torch_geometric.utils.geodesic_distance', 'geodesic_distance', (), '', False, 'from torch_geometric.utils import geodesic_distance\n'), ((18, 31, 18, 53), 'torch.tensor', 'torch.tensor', ({(18, 44, 18, 52): 'expected'}, {}), '(expected)', False, 'import torch\n'), ((19, 31, 19, 75), 'torch_geometric.utils.geodesic_distance', 'geodesic_distance', (), '', False, 'from torch_geometric.utils import geodesic_distance\n'), ((28, 31, 28, 53), 'torch.tensor', 'torch.tensor', ({(28, 44, 28, 52): 'expected'}, {}), '(expected)', False, 'import torch\n'), ((33, 25, 33, 32), 'math.sqrt', 'sqrt', ({(33, 30, 33, 31): '(2)'}, {}), '(2)', False, 'from math import sqrt\n'), ((34, 31, 34, 53), 'torch.tensor', 'torch.tensor', ({(34, 44, 34, 52): 'expected'}, {}), '(expected)', False, 'import torch\n'), ((37, 25, 37, 32), 'math.sqrt', 'sqrt', ({(37, 30, 37, 31): '(2)'}, {}), '(2)', False, 'from math import sqrt\n'), ((38, 31, 38, 53), 'torch.tensor', 'torch.tensor', ({(38, 44, 38, 52): 'expected'}, {}), '(expected)', False, 'import torch\n'), ((42, 31, 42, 53), 'torch.Tensor', 'torch.Tensor', ({(42, 44, 42, 52): 'expected'}, {}), '(expected)', False, 'import torch\n'), ((9, 11, 9, 47), 'torch.tensor', 'torch.tensor', ({(9, 24, 9, 46): '[[0, 1, 3], [0, 2, 3]]'}, {}), '([[0, 1, 3], [0, 2, 3]])', False, 'import torch\n'), ((13, 18, 13, 25), 'math.sqrt', 'sqrt', ({(13, 23, 13, 24): '(2)'}, {}), '(2)', False, 'from math import sqrt\n'), ((14, 15, 14, 22), 'math.sqrt', 'sqrt', ({(14, 20, 14, 21): '(2)'}, {}), '(2)', False, 'from math import sqrt\n'), ((15, 12, 15, 19), 'math.sqrt', 'sqrt', ({(15, 17, 15, 18): '(2)'}, {}), '(2)', False, 'from math import sqrt\n'), ((16, 9, 16, 16), 'math.sqrt', 'sqrt', ({(16, 14, 16, 15): '(2)'}, {}), '(2)', False, 'from math import sqrt\n'), ((23, 22, 23, 29), 'math.sqrt', 'sqrt', ({(23, 27, 23, 28): '(2)'}, {}), '(2)', False, 'from math import sqrt\n'), ((24, 19, 24, 26), 'math.sqrt', 'sqrt', ({(24, 24, 24, 25): '(2)'}, {}), '(2)', False, 'from math import sqrt\n'), ((25, 16, 25, 23), 'math.sqrt', 'sqrt', ({(25, 21, 25, 22): '(2)'}, {}), '(2)', False, 'from math import sqrt\n'), ((26, 13, 26, 20), 'math.sqrt', 'sqrt', ({(26, 18, 26, 19): '(2)'}, {}), '(2)', False, 'from math import sqrt\n')] |
naomielst/mongo-python-driver | gridfs/grid_file.py | e3d1d6f5b48101654a05493fd6eec7fe3fa014bd | # Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for representing files stored in GridFS."""
import datetime
import io
import math
import os
from bson.int64 import Int64
from bson.son import SON
from bson.binary import Binary
from bson.objectid import ObjectId
from pymongo import ASCENDING
from pymongo.collection import Collection
from pymongo.cursor import Cursor
from pymongo.errors import (ConfigurationError,
CursorNotFound,
DuplicateKeyError,
InvalidOperation,
OperationFailure)
from pymongo.read_preferences import ReadPreference
from gridfs.errors import CorruptGridFile, FileExists, NoFile
try:
_SEEK_SET = os.SEEK_SET
_SEEK_CUR = os.SEEK_CUR
_SEEK_END = os.SEEK_END
# before 2.5
except AttributeError:
_SEEK_SET = 0
_SEEK_CUR = 1
_SEEK_END = 2
EMPTY = b""
NEWLN = b"\n"
"""Default chunk size, in bytes."""
# Slightly under a power of 2, to work well with server's record allocations.
DEFAULT_CHUNK_SIZE = 255 * 1024
_C_INDEX = SON([("files_id", ASCENDING), ("n", ASCENDING)])
_F_INDEX = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)])
def _grid_in_property(field_name, docstring, read_only=False,
closed_only=False):
"""Create a GridIn property."""
def getter(self):
if closed_only and not self._closed:
raise AttributeError("can only get %r on a closed file" %
field_name)
# Protect against PHP-237
if field_name == 'length':
return self._file.get(field_name, 0)
return self._file.get(field_name, None)
def setter(self, value):
if self._closed:
self._coll.files.update_one({"_id": self._file["_id"]},
{"$set": {field_name: value}})
self._file[field_name] = value
if read_only:
docstring += "\n\nThis attribute is read-only."
elif closed_only:
docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and "
"can only be read after :meth:`close` "
"has been called.")
if not read_only and not closed_only:
return property(getter, setter, doc=docstring)
return property(getter, doc=docstring)
def _grid_out_property(field_name, docstring):
"""Create a GridOut property."""
def getter(self):
self._ensure_file()
# Protect against PHP-237
if field_name == 'length':
return self._file.get(field_name, 0)
return self._file.get(field_name, None)
docstring += "\n\nThis attribute is read-only."
return property(getter, doc=docstring)
def _clear_entity_type_registry(entity, **kwargs):
"""Clear the given database/collection object's type registry."""
codecopts = entity.codec_options.with_options(type_registry=None)
return entity.with_options(codec_options=codecopts, **kwargs)
def _disallow_transactions(session):
if session and session.in_transaction:
raise InvalidOperation(
'GridFS does not support multi-document transactions')
class GridIn(object):
"""Class to write data to GridFS.
"""
def __init__(self, root_collection, session=None, **kwargs):
"""Write a file to GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Raises :class:`TypeError` if `root_collection` is not an
instance of :class:`~pymongo.collection.Collection`.
Any of the file level options specified in the `GridFS Spec
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
keyword arguments. Any additional keyword arguments will be
set as additional fields on the file document. Valid keyword
arguments include:
- ``"_id"``: unique ID for this file (default:
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
not have already been used for another file
- ``"filename"``: human name for the file
- ``"contentType"`` or ``"content_type"``: valid mime-type
for the file
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
chunks, in bytes (default: 255 kb)
- ``"encoding"``: encoding used for this file. Any :class:`str`
that is written to the file will be converted to :class:`bytes`.
:Parameters:
- `root_collection`: root collection to write to
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` to use for all
commands
- `**kwargs` (optional): file level options (see above)
.. versionchanged:: 4.0
Removed the `disable_md5` parameter. See
:ref:`removed-gridfs-checksum` for details.
.. versionchanged:: 3.7
Added the `disable_md5` parameter.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.0
`root_collection` must use an acknowledged
:attr:`~pymongo.collection.Collection.write_concern`
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
if not root_collection.write_concern.acknowledged:
raise ConfigurationError('root_collection must use '
'acknowledged write_concern')
_disallow_transactions(session)
# Handle alternative naming
if "content_type" in kwargs:
kwargs["contentType"] = kwargs.pop("content_type")
if "chunk_size" in kwargs:
kwargs["chunkSize"] = kwargs.pop("chunk_size")
coll = _clear_entity_type_registry(
root_collection, read_preference=ReadPreference.PRIMARY)
# Defaults
kwargs["_id"] = kwargs.get("_id", ObjectId())
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
object.__setattr__(self, "_session", session)
object.__setattr__(self, "_coll", coll)
object.__setattr__(self, "_chunks", coll.chunks)
object.__setattr__(self, "_file", kwargs)
object.__setattr__(self, "_buffer", io.BytesIO())
object.__setattr__(self, "_position", 0)
object.__setattr__(self, "_chunk_number", 0)
object.__setattr__(self, "_closed", False)
object.__setattr__(self, "_ensured_index", False)
def __create_index(self, collection, index_key, unique):
doc = collection.find_one(projection={"_id": 1}, session=self._session)
if doc is None:
try:
index_keys = [index_spec['key'] for index_spec in
collection.list_indexes(session=self._session)]
except OperationFailure:
index_keys = []
if index_key not in index_keys:
collection.create_index(
index_key.items(), unique=unique, session=self._session)
def __ensure_indexes(self):
if not object.__getattribute__(self, "_ensured_index"):
_disallow_transactions(self._session)
self.__create_index(self._coll.files, _F_INDEX, False)
self.__create_index(self._coll.chunks, _C_INDEX, True)
object.__setattr__(self, "_ensured_index", True)
def abort(self):
"""Remove all chunks/files that may have been uploaded and close.
"""
self._coll.chunks.delete_many(
{"files_id": self._file['_id']}, session=self._session)
self._coll.files.delete_one(
{"_id": self._file['_id']}, session=self._session)
object.__setattr__(self, "_closed", True)
@property
def closed(self):
"""Is this file closed?
"""
return self._closed
_id = _grid_in_property("_id", "The ``'_id'`` value for this file.",
read_only=True)
filename = _grid_in_property("filename", "Name of this file.")
name = _grid_in_property("filename", "Alias for `filename`.")
content_type = _grid_in_property("contentType", "Mime-type for this file.")
length = _grid_in_property("length", "Length (in bytes) of this file.",
closed_only=True)
chunk_size = _grid_in_property("chunkSize", "Chunk size for this file.",
read_only=True)
upload_date = _grid_in_property("uploadDate",
"Date that this file was uploaded.",
closed_only=True)
md5 = _grid_in_property("md5", "MD5 of the contents of this file "
"if an md5 sum was created.",
closed_only=True)
def __getattr__(self, name):
if name in self._file:
return self._file[name]
raise AttributeError("GridIn object has no attribute '%s'" % name)
def __setattr__(self, name, value):
# For properties of this instance like _buffer, or descriptors set on
# the class like filename, use regular __setattr__
if name in self.__dict__ or name in self.__class__.__dict__:
object.__setattr__(self, name, value)
else:
# All other attributes are part of the document in db.fs.files.
# Store them to be sent to server on close() or if closed, send
# them now.
self._file[name] = value
if self._closed:
self._coll.files.update_one({"_id": self._file["_id"]},
{"$set": {name: value}})
def __flush_data(self, data):
"""Flush `data` to a chunk.
"""
self.__ensure_indexes()
if not data:
return
assert(len(data) <= self.chunk_size)
chunk = {"files_id": self._file["_id"],
"n": self._chunk_number,
"data": Binary(data)}
try:
self._chunks.insert_one(chunk, session=self._session)
except DuplicateKeyError:
self._raise_file_exists(self._file['_id'])
self._chunk_number += 1
self._position += len(data)
def __flush_buffer(self):
"""Flush the buffer contents out to a chunk.
"""
self.__flush_data(self._buffer.getvalue())
self._buffer.close()
self._buffer = io.BytesIO()
def __flush(self):
"""Flush the file to the database.
"""
try:
self.__flush_buffer()
# The GridFS spec says length SHOULD be an Int64.
self._file["length"] = Int64(self._position)
self._file["uploadDate"] = datetime.datetime.utcnow()
return self._coll.files.insert_one(
self._file, session=self._session)
except DuplicateKeyError:
self._raise_file_exists(self._id)
def _raise_file_exists(self, file_id):
"""Raise a FileExists exception for the given file_id."""
raise FileExists("file with _id %r already exists" % file_id)
def close(self):
"""Flush the file and close it.
A closed file cannot be written any more. Calling
:meth:`close` more than once is allowed.
"""
if not self._closed:
self.__flush()
object.__setattr__(self, "_closed", True)
def read(self, size=-1):
raise io.UnsupportedOperation('read')
def readable(self):
return False
def seekable(self):
return False
def write(self, data):
"""Write data to the file. There is no return value.
`data` can be either a string of bytes or a file-like object
(implementing :meth:`read`). If the file has an
:attr:`encoding` attribute, `data` can also be a
:class:`str` instance, which will be encoded as
:attr:`encoding` before being written.
Due to buffering, the data may not actually be written to the
database until the :meth:`close` method is called. Raises
:class:`ValueError` if this file is already closed. Raises
:class:`TypeError` if `data` is not an instance of
:class:`bytes`, a file-like object, or an instance of :class:`str`.
Unicode data is only allowed if the file has an :attr:`encoding`
attribute.
:Parameters:
- `data`: string of bytes or file-like object to be written
to the file
"""
if self._closed:
raise ValueError("cannot write to a closed file")
try:
# file-like
read = data.read
except AttributeError:
# string
if not isinstance(data, (str, bytes)):
raise TypeError("can only write strings or file-like objects")
if isinstance(data, str):
try:
data = data.encode(self.encoding)
except AttributeError:
raise TypeError("must specify an encoding for file in "
"order to write str")
read = io.BytesIO(data).read
if self._buffer.tell() > 0:
# Make sure to flush only when _buffer is complete
space = self.chunk_size - self._buffer.tell()
if space:
try:
to_write = read(space)
except:
self.abort()
raise
self._buffer.write(to_write)
if len(to_write) < space:
return # EOF or incomplete
self.__flush_buffer()
to_write = read(self.chunk_size)
while to_write and len(to_write) == self.chunk_size:
self.__flush_data(to_write)
to_write = read(self.chunk_size)
self._buffer.write(to_write)
def writelines(self, sequence):
"""Write a sequence of strings to the file.
Does not add seperators.
"""
for line in sequence:
self.write(line)
def writeable(self):
return True
def __enter__(self):
"""Support for the context manager protocol.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Support for the context manager protocol.
Close the file and allow exceptions to propagate.
"""
self.close()
# propagate exceptions
return False
class GridOut(io.IOBase):
"""Class to read data out of GridFS.
"""
def __init__(self, root_collection, file_id=None, file_document=None,
session=None):
"""Read a file from GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Either `file_id` or `file_document` must be specified,
`file_document` will be given priority if present. Raises
:class:`TypeError` if `root_collection` is not an instance of
:class:`~pymongo.collection.Collection`.
:Parameters:
- `root_collection`: root collection to read from
- `file_id` (optional): value of ``"_id"`` for the file to read
- `file_document` (optional): file document from
`root_collection.files`
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` to use for all
commands
.. versionchanged:: 3.8
For better performance and to better follow the GridFS spec,
:class:`GridOut` now uses a single cursor to read all the chunks in
the file.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.0
Creating a GridOut does not immediately retrieve the file metadata
from the server. Metadata is fetched when first needed.
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
_disallow_transactions(session)
root_collection = _clear_entity_type_registry(root_collection)
super().__init__()
self.__chunks = root_collection.chunks
self.__files = root_collection.files
self.__file_id = file_id
self.__buffer = EMPTY
self.__chunk_iter = None
self.__position = 0
self._file = file_document
self._session = session
_id = _grid_out_property("_id", "The ``'_id'`` value for this file.")
filename = _grid_out_property("filename", "Name of this file.")
name = _grid_out_property("filename", "Alias for `filename`.")
content_type = _grid_out_property("contentType", "Mime-type for this file.")
length = _grid_out_property("length", "Length (in bytes) of this file.")
chunk_size = _grid_out_property("chunkSize", "Chunk size for this file.")
upload_date = _grid_out_property("uploadDate",
"Date that this file was first uploaded.")
aliases = _grid_out_property("aliases", "List of aliases for this file.")
metadata = _grid_out_property("metadata", "Metadata attached to this file.")
md5 = _grid_out_property("md5", "MD5 of the contents of this file "
"if an md5 sum was created.")
def _ensure_file(self):
if not self._file:
_disallow_transactions(self._session)
self._file = self.__files.find_one({"_id": self.__file_id},
session=self._session)
if not self._file:
raise NoFile("no file in gridfs collection %r with _id %r" %
(self.__files, self.__file_id))
def __getattr__(self, name):
self._ensure_file()
if name in self._file:
return self._file[name]
raise AttributeError("GridOut object has no attribute '%s'" % name)
def readable(self):
return True
def readchunk(self):
"""Reads a chunk at a time. If the current position is within a
chunk the remainder of the chunk is returned.
"""
received = len(self.__buffer)
chunk_data = EMPTY
chunk_size = int(self.chunk_size)
if received > 0:
chunk_data = self.__buffer
elif self.__position < int(self.length):
chunk_number = int((received + self.__position) / chunk_size)
if self.__chunk_iter is None:
self.__chunk_iter = _GridOutChunkIterator(
self, self.__chunks, self._session, chunk_number)
chunk = self.__chunk_iter.next()
chunk_data = chunk["data"][self.__position % chunk_size:]
if not chunk_data:
raise CorruptGridFile("truncated chunk")
self.__position += len(chunk_data)
self.__buffer = EMPTY
return chunk_data
def read(self, size=-1):
"""Read at most `size` bytes from the file (less if there
isn't enough data).
The bytes are returned as an instance of :class:`str` (:class:`bytes`
in python 3). If `size` is negative or omitted all data is read.
:Parameters:
- `size` (optional): the number of bytes to read
.. versionchanged:: 3.8
This method now only checks for extra chunks after reading the
entire file. Previously, this method would check for extra chunks
on every call.
"""
self._ensure_file()
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = io.BytesIO()
while received < size:
chunk_data = self.readchunk()
received += len(chunk_data)
data.write(chunk_data)
# Detect extra chunks after reading the entire file.
if size == remainder and self.__chunk_iter:
try:
self.__chunk_iter.next()
except StopIteration:
pass
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
def readline(self, size=-1):
"""Read one line or up to `size` bytes from the file.
:Parameters:
- `size` (optional): the maximum number of bytes to read
"""
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = io.BytesIO()
while received < size:
chunk_data = self.readchunk()
pos = chunk_data.find(NEWLN, 0, size)
if pos != -1:
size = received + pos + 1
received += len(chunk_data)
data.write(chunk_data)
if pos != -1:
break
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
def tell(self):
"""Return the current position of this file.
"""
return self.__position
def seek(self, pos, whence=_SEEK_SET):
"""Set the current position of this file.
:Parameters:
- `pos`: the position (or offset if using relative
positioning) to seek to
- `whence` (optional): where to seek
from. :attr:`os.SEEK_SET` (``0``) for absolute file
positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative
to the current position, :attr:`os.SEEK_END` (``2``) to
seek relative to the file's end.
"""
if whence == _SEEK_SET:
new_pos = pos
elif whence == _SEEK_CUR:
new_pos = self.__position + pos
elif whence == _SEEK_END:
new_pos = int(self.length) + pos
else:
raise IOError(22, "Invalid value for `whence`")
if new_pos < 0:
raise IOError(22, "Invalid value for `pos` - must be positive")
# Optimization, continue using the same buffer and chunk iterator.
if new_pos == self.__position:
return
self.__position = new_pos
self.__buffer = EMPTY
if self.__chunk_iter:
self.__chunk_iter.close()
self.__chunk_iter = None
def seekable(self):
return True
def __iter__(self):
"""Return an iterator over all of this file's data.
The iterator will return lines (delimited by ``b'\\n'``) of
:class:`bytes`. This can be useful when serving files
using a webserver that handles such an iterator efficiently.
.. versionchanged:: 3.8
The iterator now raises :class:`CorruptGridFile` when encountering
any truncated, missing, or extra chunk in a file. The previous
behavior was to only raise :class:`CorruptGridFile` on a missing
chunk.
.. versionchanged:: 4.0
The iterator now iterates over *lines* in the file, instead
of chunks, to conform to the base class :py:class:`io.IOBase`.
Use :meth:`GridOut.readchunk` to read chunk by chunk instead
of line by line.
"""
return self
def close(self):
"""Make GridOut more generically file-like."""
if self.__chunk_iter:
self.__chunk_iter.close()
self.__chunk_iter = None
super().close()
def write(self, value):
raise io.UnsupportedOperation('write')
def writelines(self, lines):
raise io.UnsupportedOperation('writelines')
def writable(self):
return False
def __enter__(self):
"""Makes it possible to use :class:`GridOut` files
with the context manager protocol.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Makes it possible to use :class:`GridOut` files
with the context manager protocol.
"""
self.close()
return False
def fileno(self):
raise io.UnsupportedOperation('fileno')
def flush(self):
# GridOut is read-only, so flush does nothing.
pass
def isatty(self):
return False
def truncate(self, size=None):
# See https://docs.python.org/3/library/io.html#io.IOBase.writable
# for why truncate has to raise.
raise io.UnsupportedOperation('truncate')
# Override IOBase.__del__ otherwise it will lead to __getattr__ on
# __IOBase_closed which calls _ensure_file and potentially performs I/O.
# We cannot do I/O in __del__ since it can lead to a deadlock.
def __del__(self):
pass
class _GridOutChunkIterator(object):
"""Iterates over a file's chunks using a single cursor.
Raises CorruptGridFile when encountering any truncated, missing, or extra
chunk in a file.
"""
def __init__(self, grid_out, chunks, session, next_chunk):
self._id = grid_out._id
self._chunk_size = int(grid_out.chunk_size)
self._length = int(grid_out.length)
self._chunks = chunks
self._session = session
self._next_chunk = next_chunk
self._num_chunks = math.ceil(float(self._length) / self._chunk_size)
self._cursor = None
def expected_chunk_length(self, chunk_n):
if chunk_n < self._num_chunks - 1:
return self._chunk_size
return self._length - (self._chunk_size * (self._num_chunks - 1))
def __iter__(self):
return self
def _create_cursor(self):
filter = {"files_id": self._id}
if self._next_chunk > 0:
filter["n"] = {"$gte": self._next_chunk}
_disallow_transactions(self._session)
self._cursor = self._chunks.find(filter, sort=[("n", 1)],
session=self._session)
def _next_with_retry(self):
"""Return the next chunk and retry once on CursorNotFound.
We retry on CursorNotFound to maintain backwards compatibility in
cases where two calls to read occur more than 10 minutes apart (the
server's default cursor timeout).
"""
if self._cursor is None:
self._create_cursor()
try:
return self._cursor.next()
except CursorNotFound:
self._cursor.close()
self._create_cursor()
return self._cursor.next()
def next(self):
try:
chunk = self._next_with_retry()
except StopIteration:
if self._next_chunk >= self._num_chunks:
raise
raise CorruptGridFile("no chunk #%d" % self._next_chunk)
if chunk["n"] != self._next_chunk:
self.close()
raise CorruptGridFile(
"Missing chunk: expected chunk #%d but found "
"chunk with n=%d" % (self._next_chunk, chunk["n"]))
if chunk["n"] >= self._num_chunks:
# According to spec, ignore extra chunks if they are empty.
if len(chunk["data"]):
self.close()
raise CorruptGridFile(
"Extra chunk found: expected %d chunks but found "
"chunk with n=%d" % (self._num_chunks, chunk["n"]))
expected_length = self.expected_chunk_length(chunk["n"])
if len(chunk["data"]) != expected_length:
self.close()
raise CorruptGridFile(
"truncated chunk #%d: expected chunk length to be %d but "
"found chunk with length %d" % (
chunk["n"], expected_length, len(chunk["data"])))
self._next_chunk += 1
return chunk
__next__ = next
def close(self):
if self._cursor:
self._cursor.close()
self._cursor = None
class GridOutIterator(object):
def __init__(self, grid_out, chunks, session):
self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0)
def __iter__(self):
return self
def next(self):
chunk = self.__chunk_iter.next()
return bytes(chunk["data"])
__next__ = next
class GridOutCursor(Cursor):
"""A cursor / iterator for returning GridOut objects as the result
of an arbitrary query against the GridFS files collection.
"""
def __init__(self, collection, filter=None, skip=0, limit=0,
no_cursor_timeout=False, sort=None, batch_size=0,
session=None):
"""Create a new cursor, similar to the normal
:class:`~pymongo.cursor.Cursor`.
Should not be called directly by application developers - see
the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead.
.. versionadded 2.7
.. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_.
"""
_disallow_transactions(session)
collection = _clear_entity_type_registry(collection)
# Hold on to the base "fs" collection to create GridOut objects later.
self.__root_collection = collection
super(GridOutCursor, self).__init__(
collection.files, filter, skip=skip, limit=limit,
no_cursor_timeout=no_cursor_timeout, sort=sort,
batch_size=batch_size, session=session)
def next(self):
"""Get next GridOut object from cursor.
"""
_disallow_transactions(self.session)
# Work around "super is not iterable" issue in Python 3.x
next_file = super(GridOutCursor, self).next()
return GridOut(self.__root_collection, file_document=next_file,
session=self.session)
__next__ = next
def add_option(self, *args, **kwargs):
raise NotImplementedError("Method does not exist for GridOutCursor")
def remove_option(self, *args, **kwargs):
raise NotImplementedError("Method does not exist for GridOutCursor")
def _clone_base(self, session):
"""Creates an empty GridOutCursor for information to be copied into.
"""
return GridOutCursor(self.__root_collection, session=session)
| [((54, 11, 54, 59), 'bson.son.SON', 'SON', ({(54, 15, 54, 58): "[('files_id', ASCENDING), ('n', ASCENDING)]"}, {}), "([('files_id', ASCENDING), ('n', ASCENDING)])", False, 'from bson.son import SON\n'), ((55, 11, 55, 68), 'bson.son.SON', 'SON', ({(55, 15, 55, 67): "[('filename', ASCENDING), ('uploadDate', ASCENDING)]"}, {}), "([('filename', ASCENDING), ('uploadDate', ASCENDING)])", False, 'from bson.son import SON\n'), ((110, 14, 111, 66), 'pymongo.errors.InvalidOperation', 'InvalidOperation', ({(111, 12, 111, 65): '"""GridFS does not support multi-document transactions"""'}, {}), "('GridFS does not support multi-document transactions')", False, 'from pymongo.errors import ConfigurationError, CursorNotFound, DuplicateKeyError, InvalidOperation, OperationFailure\n'), ((293, 23, 293, 35), 'io.BytesIO', 'io.BytesIO', ({}, {}), '()', False, 'import io\n'), ((311, 14, 311, 69), 'gridfs.errors.FileExists', 'FileExists', ({(311, 25, 311, 68): "('file with _id %r already exists' % file_id)"}, {}), "('file with _id %r already exists' % file_id)", False, 'from gridfs.errors import CorruptGridFile, FileExists, NoFile\n'), ((324, 14, 324, 45), 'io.UnsupportedOperation', 'io.UnsupportedOperation', ({(324, 38, 324, 44): '"""read"""'}, {}), "('read')", False, 'import io\n'), ((554, 15, 554, 27), 'io.BytesIO', 'io.BytesIO', ({}, {}), '()', False, 'import io\n'), ((589, 15, 589, 27), 'io.BytesIO', 'io.BytesIO', ({}, {}), '()', False, 'import io\n'), ((680, 14, 680, 46), 'io.UnsupportedOperation', 'io.UnsupportedOperation', ({(680, 38, 680, 45): '"""write"""'}, {}), "('write')", False, 'import io\n'), ((683, 14, 683, 51), 'io.UnsupportedOperation', 'io.UnsupportedOperation', ({(683, 38, 683, 50): '"""writelines"""'}, {}), "('writelines')", False, 'import io\n'), ((702, 14, 702, 47), 'io.UnsupportedOperation', 'io.UnsupportedOperation', ({(702, 38, 702, 46): '"""fileno"""'}, {}), "('fileno')", False, 'import io\n'), ((714, 14, 714, 49), 'io.UnsupportedOperation', 'io.UnsupportedOperation', ({(714, 38, 714, 48): '"""truncate"""'}, {}), "('truncate')", False, 'import io\n'), ((174, 18, 175, 66), 'pymongo.errors.ConfigurationError', 'ConfigurationError', ({(174, 37, 175, 65): '"""root_collection must use acknowledged write_concern"""'}, {}), "('root_collection must use acknowledged write_concern')", False, 'from pymongo.errors import ConfigurationError, CursorNotFound, DuplicateKeyError, InvalidOperation, OperationFailure\n'), ((188, 42, 188, 52), 'bson.objectid.ObjectId', 'ObjectId', ({}, {}), '()', False, 'from bson.objectid import ObjectId\n'), ((194, 44, 194, 56), 'io.BytesIO', 'io.BytesIO', ({}, {}), '()', False, 'import io\n'), ((279, 25, 279, 37), 'bson.binary.Binary', 'Binary', ({(279, 32, 279, 36): 'data'}, {}), '(data)', False, 'from bson.binary import Binary\n'), ((301, 35, 301, 56), 'bson.int64.Int64', 'Int64', ({(301, 41, 301, 55): 'self._position'}, {}), '(self._position)', False, 'from bson.int64 import Int64\n'), ((302, 39, 302, 65), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ({}, {}), '()', False, 'import datetime\n'), ((782, 18, 784, 67), 'gridfs.errors.CorruptGridFile', 'CorruptGridFile', ({(783, 16, 784, 66): "('Missing chunk: expected chunk #%d but found chunk with n=%d' % (self.\n _next_chunk, chunk['n']))"}, {}), "(\n 'Missing chunk: expected chunk #%d but found chunk with n=%d' % (self.\n _next_chunk, chunk['n']))", False, 'from gridfs.errors import CorruptGridFile, FileExists, NoFile\n'), ((491, 22, 492, 60), 'gridfs.errors.NoFile', 'NoFile', ({(491, 29, 492, 59): "('no file in gridfs collection %r with _id %r' % (self.__files, self.__file_id)\n )"}, {}), "('no file in gridfs collection %r with _id %r' % (self.__files, self.\n __file_id))", False, 'from gridfs.errors import CorruptGridFile, FileExists, NoFile\n'), ((778, 18, 778, 68), 'gridfs.errors.CorruptGridFile', 'CorruptGridFile', ({(778, 34, 778, 67): "('no chunk #%d' % self._next_chunk)"}, {}), "('no chunk #%d' % self._next_chunk)", False, 'from gridfs.errors import CorruptGridFile, FileExists, NoFile\n'), ((790, 22, 792, 71), 'gridfs.errors.CorruptGridFile', 'CorruptGridFile', ({(791, 20, 792, 70): "('Extra chunk found: expected %d chunks but found chunk with n=%d' % (self.\n _num_chunks, chunk['n']))"}, {}), "(\n 'Extra chunk found: expected %d chunks but found chunk with n=%d' % (\n self._num_chunks, chunk['n']))", False, 'from gridfs.errors import CorruptGridFile, FileExists, NoFile\n'), ((369, 19, 369, 35), 'io.BytesIO', 'io.BytesIO', ({(369, 30, 369, 34): 'data'}, {}), '(data)', False, 'import io\n'), ((523, 22, 523, 56), 'gridfs.errors.CorruptGridFile', 'CorruptGridFile', ({(523, 38, 523, 55): '"""truncated chunk"""'}, {}), "('truncated chunk')", False, 'from gridfs.errors import CorruptGridFile, FileExists, NoFile\n')] |
tcl326/forte | forte/processors/tests/stanfordnlp_processor_test.py | d0d7b8b97da5e1d507dfa7cd4ec51d96067770b8 | """This module tests Stanford NLP processors."""
import os
import unittest
from texar.torch import HParams
from forte.pipeline import Pipeline
from forte.data.readers import StringReader
from forte.processors.stanfordnlp_processor import StandfordNLPProcessor
from ft.onto.base_ontology import Token, Sentence
class TestStanfordNLPProcessor(unittest.TestCase):
def setUp(self):
self.stanford_nlp = Pipeline()
self.stanford_nlp.set_reader(StringReader())
models_path = os.getcwd()
config = HParams({
"processors": "tokenize",
"lang": "en",
# Language code for the language to build the Pipeline
"use_gpu": False
}, StandfordNLPProcessor.default_hparams())
self.stanford_nlp.add_processor(StandfordNLPProcessor(models_path),
config=config)
self.stanford_nlp.initialize()
# TODO
@unittest.skip("We need to test this without needing to download models "
"everytime")
def test_stanford_processor(self):
sentences = ["This tool is called Forte.",
"The goal of this project to help you build NLP "
"pipelines.",
"NLP has never been made this easy before."]
document = ' '.join(sentences)
pack = self.stanford_nlp.process(document)
print(pack)
| [((29, 5, 30, 31), 'unittest.skip', 'unittest.skip', ({(29, 19, 30, 30): '"""We need to test this without needing to download models everytime"""'}, {}), "(\n 'We need to test this without needing to download models everytime')", False, 'import unittest\n'), ((15, 28, 15, 38), 'forte.pipeline.Pipeline', 'Pipeline', ({}, {}), '()', False, 'from forte.pipeline import Pipeline\n'), ((17, 22, 17, 33), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((16, 37, 16, 51), 'forte.data.readers.StringReader', 'StringReader', ({}, {}), '()', False, 'from forte.data.readers import StringReader\n'), ((23, 11, 23, 50), 'forte.processors.stanfordnlp_processor.StandfordNLPProcessor.default_hparams', 'StandfordNLPProcessor.default_hparams', ({}, {}), '()', False, 'from forte.processors.stanfordnlp_processor import StandfordNLPProcessor\n'), ((24, 40, 24, 74), 'forte.processors.stanfordnlp_processor.StandfordNLPProcessor', 'StandfordNLPProcessor', ({(24, 62, 24, 73): 'models_path'}, {}), '(models_path)', False, 'from forte.processors.stanfordnlp_processor import StandfordNLPProcessor\n')] |
eventh/m3u8looper | src/serve_files.py | 9c4ae166e9af4679cf64b19e3c3efc7bbdaed5a5 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Serve current folder files in a HTTP webserver.
"""
import socketserver
from threading import Thread
from http.server import SimpleHTTPRequestHandler
PORT = 8000
def start_http_server(port=PORT):
httpd = socketserver.TCPServer(("", port), SimpleHTTPRequestHandler)
thread = Thread(target = httpd.serve_forever)
thread.start()
return thread
if __name__ == '__main__':
thread = start_http_server()
thread.join()
| [((15, 12, 15, 72), 'socketserver.TCPServer', 'socketserver.TCPServer', ({(15, 35, 15, 45): "('', port)", (15, 47, 15, 71): 'SimpleHTTPRequestHandler'}, {}), "(('', port), SimpleHTTPRequestHandler)", False, 'import socketserver\n'), ((16, 13, 16, 49), 'threading.Thread', 'Thread', (), '', False, 'from threading import Thread\n')] |
yxzoro/pypy | pypy/module/__builtin__/test/test_compile.py | 6e47b3d3e5513d9639a21554963a6ace172ccfee | # coding: utf-8
class AppTestCompile:
def test_simple(self):
import sys
co = compile('1+2', '?', 'eval')
assert eval(co) == 3
co = compile(memoryview(b'1+2'), '?', 'eval')
assert eval(co) == 3
exc = raises(ValueError, compile, chr(0), '?', 'eval')
assert str(exc.value) == "source code string cannot contain null bytes"
compile("from __future__ import with_statement", "<test>", "exec")
raises(SyntaxError, compile, '-', '?', 'eval')
raises(SyntaxError, compile, '"\\xt"', '?', 'eval')
raises(ValueError, compile, '1+2', '?', 'maybenot')
raises(ValueError, compile, "\n", "<string>", "exec", 0xff)
raises(TypeError, compile, '1+2', 12, 34)
def test_error_message(self):
import re
compile('# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec')
exc = raises(SyntaxError, compile,
b'# -*- coding: fake -*-\n', 'dummy', 'exec')
assert 'fake' in str(exc.value)
exc = raises(SyntaxError, compile,
b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
assert 'iso-8859-15' in str(exc.value)
assert 'BOM' in str(exc.value)
exc = raises(SyntaxError, compile,
b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
assert 'fake' in str(exc.value)
assert 'BOM' in str(exc.value)
def test_unicode(self):
try:
compile(u'-', '?', 'eval')
except SyntaxError as e:
assert e.lineno == 1
def test_unicode_encoding(self):
code = "# -*- coding: utf-8 -*-\npass\n"
compile(code, "tmp", "exec")
def test_bytes(self):
code = b"# -*- coding: utf-8 -*-\npass\n"
compile(code, "tmp", "exec")
c = compile(b"# coding: latin1\nfoo = 'caf\xe9'\n", "<string>", "exec")
ns = {}
exec(c, ns)
assert ns['foo'] == 'café'
assert eval(b"# coding: latin1\n'caf\xe9'\n") == 'café'
def test_memoryview(self):
m = memoryview(b'2 + 1')
co = compile(m, 'baz', 'eval')
assert eval(co) == 3
assert eval(m) == 3
ns = {}
exec(memoryview(b'r = 2 + 1'), ns)
assert ns['r'] == 3
def test_recompile_ast(self):
import _ast
# raise exception when node type doesn't match with compile mode
co1 = compile('print(1)', '<string>', 'exec', _ast.PyCF_ONLY_AST)
raises(TypeError, compile, co1, '<ast>', 'eval')
co2 = compile('1+1', '<string>', 'eval', _ast.PyCF_ONLY_AST)
tree = compile(co2, '<ast>', 'eval')
assert compile(co2, '<ast>', 'eval', _ast.PyCF_ONLY_AST) is co2
def test_leading_newlines(self):
src = """
def fn(): pass
"""
co = compile(src, 'mymod', 'exec')
firstlineno = co.co_firstlineno
assert firstlineno == 2
def test_null_bytes(self):
raises(ValueError, compile, '\x00', 'mymod', 'exec', 0)
src = "#abc\x00def\n"
raises(ValueError, compile, src, 'mymod', 'exec')
raises(ValueError, compile, src, 'mymod', 'exec', 0)
def test_null_bytes_flag(self):
try:
from _ast import PyCF_ACCEPT_NULL_BYTES
except ImportError:
skip('PyPy only (requires _ast.PyCF_ACCEPT_NULL_BYTES)')
raises(SyntaxError, compile, '\x00', 'mymod', 'exec',
PyCF_ACCEPT_NULL_BYTES)
src = "#abc\x00def\n"
compile(src, 'mymod', 'exec', PyCF_ACCEPT_NULL_BYTES) # works
def test_compile_regression(self):
"""Clone of the part of the original test that was failing."""
import ast
codestr = '''def f():
"""doc"""
try:
assert False
except AssertionError:
return (True, f.__doc__)
else:
return (False, f.__doc__)
'''
def f(): """doc"""
values = [(-1, __debug__, f.__doc__),
(0, True, 'doc'),
(1, False, 'doc'),
(2, False, None)]
for optval, debugval, docstring in values:
# test both direct compilation and compilation via AST
codeobjs = []
codeobjs.append(
compile(codestr, "<test>", "exec", optimize=optval))
tree = ast.parse(codestr)
codeobjs.append(compile(tree, "<test>", "exec", optimize=optval))
for i, code in enumerate(codeobjs):
print(optval, debugval, docstring, i)
ns = {}
exec(code, ns)
rv = ns['f']()
assert rv == (debugval, docstring)
def test_assert_remove(self):
"""Test removal of the asserts with optimize=1."""
import ast
code = """def f():
assert False
"""
tree = ast.parse(code)
for to_compile in [code, tree]:
compiled = compile(to_compile, "<test>", "exec", optimize=1)
ns = {}
exec(compiled, ns)
ns['f']()
def test_docstring_remove(self):
"""Test removal of docstrings with optimize=2."""
import ast
import marshal
code = """
'module_doc'
def f():
'func_doc'
class C:
'class_doc'
"""
tree = ast.parse(code)
for to_compile in [code, tree]:
compiled = compile(to_compile, "<test>", "exec", optimize=2)
ns = {}
exec(compiled, ns)
assert '__doc__' not in ns
assert ns['f'].__doc__ is None
assert ns['C'].__doc__ is None
# Check that the docstrings are gone from the bytecode and not just
# inaccessible.
marshalled = str(marshal.dumps(compiled))
assert 'module_doc' not in marshalled
assert 'func_doc' not in marshalled
assert 'class_doc' not in marshalled
class TestOptimizeO:
"""Test interaction of -O flag and optimize parameter of compile."""
def setup_method(self, method):
space = self.space
self._sys_debug = space.sys.debug
# imitate -O
space.sys.debug = False
def teardown_method(self, method):
self.space.sys.debug = self._sys_debug
def test_O_optmize_0(self):
"""Test that assert is not ignored if -O flag is set but optimize=0."""
space = self.space
w_res = space.appexec([], """():
assert False # check that our -O imitation hack works
try:
exec(compile('assert False', '', 'exec', optimize=0))
except AssertionError:
return True
else:
return False
""")
assert space.unwrap(w_res)
def test_O_optimize__1(self):
"""Test that assert is ignored with -O and optimize=-1."""
space = self.space
space.appexec([], """():
exec(compile('assert False', '', 'exec', optimize=-1))
""")
# TODO: Check the value of __debug__ inside of the compiled block!
# According to the documentation, it should follow the optimize flag.
# However, cpython3.5.0a0 behaves the same way as PyPy (__debug__ follows
# -O, -OO flags of the interpreter).
| [((139, 15, 139, 30), 'ast.parse', 'ast.parse', ({(139, 25, 139, 29): 'code'}, {}), '(code)', False, 'import ast\n'), ((160, 15, 160, 30), 'ast.parse', 'ast.parse', ({(160, 25, 160, 29): 'code'}, {}), '(code)', False, 'import ast\n'), ((122, 19, 122, 37), 'ast.parse', 'ast.parse', ({(122, 29, 122, 36): 'codestr'}, {}), '(codestr)', False, 'import ast\n'), ((172, 29, 172, 52), 'marshal.dumps', 'marshal.dumps', ({(172, 43, 172, 51): 'compiled'}, {}), '(compiled)', False, 'import marshal\n')] |
huangbrandon432/Investing-Trading-Tool | tickers_graphing_module.py | 370015b906b7ee90c0fb48ca69865ac7428b3917 |
import yfinance as yf
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
from IPython.display import Markdown
import numpy as np
from datetime import date, timedelta
def plot_and_get_info(ticker, start = None, end = None, ma = 'yes'):
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'max')
if start and end:
start_date, end_date = start, end
else:
start_date, end_date = ticker_hist.index[0], ticker_hist.index[-1]
frame = ticker_hist.loc[start_date:end_date]
closing_prices = frame['Close']
volume = frame['Volume']
fig = make_subplots(rows=2, cols=1,
shared_xaxes=True,
vertical_spacing=0.03, row_heights = [0.8, 0.2])
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = 'Close'), row = 1, col = 1)
if ma == 'yes':
closing_prices_ma = frame['Close'].rolling(7).mean()
fig.add_trace(go.Scatter(x = closing_prices_ma.index, y = closing_prices_ma, mode = 'lines', name = '7D Close Moving Average'), row = 1, col = 1)
fig.add_trace(go.Bar(x = closing_prices.index, y = volume, name = 'Volume'), row=2, col=1)
fig.update_xaxes(rangeslider_visible = True, rangeslider_thickness = 0.1, row=2, col=1)
fig.update_yaxes(title_text="Price", row=1, col=1)
fig.update_layout(title=ticker, height = 600,
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=7,
label="1w",
step="day",
stepmode="backward"),
dict(count=1,
label="1m",
step="month",
stepmode="backward"),
dict(count=3,
label="3m",
step="month",
stepmode="backward"),
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
type="date"
)
)
fig.show()
start_price, end_price = frame.iloc[0]['Close'], frame.iloc[-1]['Close']
def printmd(string):
display(Markdown(string))
printmd('Given Timeframe:')
printmd("Return: {:.2f}%".format((end_price - start_price)/start_price*100))
try:
ticker_info = ticker_obj.info
print()
printmd('Business Summary: ' + ticker_info['longBusinessSummary'])
market_cap = str(round(ticker_info['marketCap']/1000000000,2)) + 'B'
longname = ticker_info['longName']
sector = ticker_info['sector']
industry = ticker_info['industry']
country = ticker_info['country']
avg10d_vol = str(round(ticker_info['averageDailyVolume10Day']/1000000,2)) + 'M'
most_recent_vol = str(round(ticker_info['volume']/1000000,2)) + 'M'
try:
beta = round(ticker_info['beta'],2)
except:
beta = ticker_info['beta']
try:
ps_trailing_12mo = round(ticker_info['priceToSalesTrailing12Months'],2)
except:
ps_trailing_12mo = ticker_info['priceToSalesTrailing12Months']
try:
forwardpe = round(ticker_info['forwardPE'],2)
except:
forwardpe = ticker_info['forwardPE']
pegratio = ticker_info['pegRatio']
forwardeps = ticker_info['forwardEps']
trailingeps = ticker_info['trailingEps']
shares_outstanding = str(round(ticker_info['sharesOutstanding']/1000000,2)) + 'M'
shares_short = str(round(ticker_info['sharesShort']/1000000,2)) + 'M'
shares_short_perc_outstanding = str(round(ticker_info['sharesPercentSharesOut']*100,2)) + '%'
floatshares = str(round(ticker_info['floatShares']/1000000,2)) + 'M'
try:
short_perc_float = str(round(ticker_info['shortPercentOfFloat']*100,2)) + '%'
except:
short_perc_float = ticker_info['shortPercentOfFloat']
perc_institutions = str(round(ticker_info['heldPercentInstitutions']*100,2)) + '%'
perc_insiders = str(round(ticker_info['heldPercentInsiders']*100,2)) + '%'
stock_info = [market_cap, longname, sector, industry, country, beta, most_recent_vol, avg10d_vol, ps_trailing_12mo, forwardpe, pegratio, forwardeps, trailingeps,
shares_outstanding, perc_institutions, perc_insiders, shares_short, shares_short_perc_outstanding, floatshares, short_perc_float]
stock_info_df = pd.DataFrame(stock_info, index = ['Market Cap', 'Name', 'Sector', 'Industry', 'Country', 'Beta', 'Day Volume (Most recent)',
'Avg 10D Volume', 'P/S Trailing 12mo', 'Forward P/E', 'PEG Ratio', 'Forward EPS',
'Trailing EPS', 'Shares Outstanding', 'Institutions % of Oustanding',
'Insiders % of Oustanding', 'Shares Short (Prev Mo)', 'Short % of Outstanding (Prev Mo)',
'Shares Float', 'Short % of Float (Prev Mo)'], columns = ['Info'])
print()
display(stock_info_df)
except:
pass
def compare_charts(tickers = [], start = None, end = None, ma = 'yes'):
if len(tickers) <= 1:
raise Exception("Please enter at least two tickers to compare")
def normalize_data(column):
min = column.min()
max = column.max()
# time series normalization
# y will be a column in a dataframe
y = (column - min) / (max - min)
return y
def printmd(string):
display(Markdown(string))
start_end_prices = {}
closing_90_days = []
fig = go.Figure()
for ticker in tickers:
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'max')
if start and end:
start_date, end_date = start, end
else:
start_date, end_date = ticker_hist.index[0], ticker_hist.index[-1]
frame = ticker_hist.loc[start_date:end_date].copy()
frame['Norm Close'] = normalize_data(frame['Close'])
closing_prices = frame['Norm Close']
start_end_prices[ticker] = {'start_price': frame.iloc[0]['Close'], 'end_price': frame.iloc[-1]['Close']}
closing_90_days.append(closing_prices.iloc[-90:].to_frame().rename(columns = {'Norm Close': ticker}))
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = ticker + ' Norm Close'))
if ma == 'yes':
closing_prices_ma = frame['Norm Close'].rolling(7).mean()
fig.add_trace(go.Scatter(x = closing_prices_ma.index, y = closing_prices_ma, mode = 'lines', name = ticker + '7D Close Moving Average'))
fig.update_layout(title = ', '.join(tickers) + ' Comparison', yaxis_title = 'Norm Price')
fig.update_layout(height = 600,
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=7,
label="1w",
step="day",
stepmode="backward"),
dict(count=1,
label="1m",
step="month",
stepmode="backward"),
dict(count=3,
label="3m",
step="month",
stepmode="backward"),
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
rangeslider=dict(
visible=True, thickness = 0.1
),
type="date"
)
)
fig.show()
printmd('Given Timeframe:')
for ticker in tickers:
start_price, end_price = start_end_prices[ticker]['start_price'], start_end_prices[ticker]['end_price']
printmd(ticker + " Return: {:.2f}%".format((end_price - start_price)/start_price*100))
if len(tickers) > 2:
concat_closing_90_days = pd.concat(closing_90_days, axis = 1)
print('\n')
printmd("Last 90 Days Close Pearson Correlation Matrix: ")
display(concat_closing_90_days.corr())
fig2 = px.imshow(concat_closing_90_days.corr(), color_continuous_scale = 'blues', title = 'Last 90 Days Close Pearson Correlation Heatmap',
width = 500, height = 400)
fig2.show()
else:
fig2 = go.Figure()
fig2.add_trace(go.Scatter(x = closing_90_days[0].loc[:, tickers[0]], y = closing_90_days[1].loc[:, tickers[1]], mode = 'markers', name = 'Norm Close'))
fig2.update_layout(title = ', '.join(tickers) + ' Last 90 Days Correlation', xaxis_title = tickers[0], yaxis_title = tickers[1], width = 1000, height = 500)
fig2.show()
printmd("Pearson Correlation: " + str(round(closing_90_days[0].loc[:, tickers[0]].corr(closing_90_days[1].loc[:, tickers[1]]),3)))
print()
def plot_buysell_points(ticker, tradesdf, crypto = 'no'):
trade_history = tradesdf[tradesdf['Symbol'] == ticker].reset_index(drop=True)
if crypto == 'yes':
ticker += '-USD'
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'max')
if len(ticker_hist) == 0:
return
start_date = (pd.to_datetime(trade_history.loc[0, 'Date']) - timedelta(150)).strftime("%Y-%m-%d")
today_date = date.today().strftime("%Y-%m-%d")
frame = ticker_hist.loc[start_date:today_date]
closing_prices = frame['Close']
fig = go.Figure()
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = 'Close'))
for i in range(len(trade_history)):
trade_date = trade_history.loc[i, 'Date']
price = trade_history.loc[i, 'Avg_Price']
quantity = trade_history.loc[i, 'Quantity']
total = trade_history.loc[i, 'Total']
side = trade_history.loc[i, 'Side']
gain = trade_history.loc[i, 'Gain']
perc_gain = trade_history.loc[i, '% Gain']
if side == 'buy':
fig.add_annotation(x = trade_date, y = price, text = f'BB', showarrow = True, arrowhead = 1,
ax = -0.5, ay = -30, arrowsize = 1.5, align = 'left',
hovertext = f'B, P: {price}, Q: {quantity}, T: {total}, D: {trade_date}')
if side == 'sell':
fig.add_annotation(x = trade_date, y = price, text = f'SS', showarrow = True, arrowhead = 1,
ax = 20, ay = -30, arrowsize = 1.5, align = 'right',
hovertext = f'S, P: {price}, Q: {quantity}, T: {total}, D: {trade_date}, G: {gain}, %G: {perc_gain}')
fig.update_layout(title = ticker, yaxis_title = 'Price')
fig.show()
| [((15, 17, 15, 34), 'yfinance.Ticker', 'yf.Ticker', ({(15, 27, 15, 33): 'ticker'}, {}), '(ticker)', True, 'import yfinance as yf\n'), ((30, 10, 32, 68), 'plotly.subplots.make_subplots', 'make_subplots', (), '', False, 'from plotly.subplots import make_subplots\n'), ((186, 10, 186, 21), 'plotly.graph_objects.Figure', 'go.Figure', ({}, {}), '()', True, 'import plotly.graph_objects as go\n'), ((303, 17, 303, 34), 'yfinance.Ticker', 'yf.Ticker', ({(303, 27, 303, 33): 'ticker'}, {}), '(ticker)', True, 'import yfinance as yf\n'), ((316, 10, 316, 21), 'plotly.graph_objects.Figure', 'go.Figure', ({}, {}), '()', True, 'import plotly.graph_objects as go\n'), ((35, 18, 35, 106), 'plotly.graph_objects.Scatter', 'go.Scatter', (), '', True, 'import plotly.graph_objects as go\n'), ((42, 18, 42, 79), 'plotly.graph_objects.Bar', 'go.Bar', (), '', True, 'import plotly.graph_objects as go\n'), ((148, 24, 152, 127), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((191, 21, 191, 38), 'yfinance.Ticker', 'yf.Ticker', ({(191, 31, 191, 37): 'ticker'}, {}), '(ticker)', True, 'import yfinance as yf\n'), ((270, 33, 270, 69), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((283, 15, 283, 26), 'plotly.graph_objects.Figure', 'go.Figure', ({}, {}), '()', True, 'import plotly.graph_objects as go\n'), ((318, 18, 318, 106), 'plotly.graph_objects.Scatter', 'go.Scatter', (), '', True, 'import plotly.graph_objects as go\n'), ((40, 22, 40, 134), 'plotly.graph_objects.Scatter', 'go.Scatter', (), '', True, 'import plotly.graph_objects as go\n'), ((91, 16, 91, 32), 'IPython.display.Markdown', 'Markdown', ({(91, 25, 91, 31): 'string'}, {}), '(string)', False, 'from IPython.display import Markdown\n'), ((179, 16, 179, 32), 'IPython.display.Markdown', 'Markdown', ({(179, 25, 179, 31): 'string'}, {}), '(string)', False, 'from IPython.display import Markdown\n'), ((208, 22, 208, 125), 'plotly.graph_objects.Scatter', 'go.Scatter', (), '', True, 'import plotly.graph_objects as go\n'), ((285, 23, 285, 158), 'plotly.graph_objects.Scatter', 'go.Scatter', (), '', True, 'import plotly.graph_objects as go\n'), ((310, 17, 310, 29), 'datetime.date.today', 'date.today', ({}, {}), '()', False, 'from datetime import date, timedelta\n'), ((212, 26, 212, 147), 'plotly.graph_objects.Scatter', 'go.Scatter', (), '', True, 'import plotly.graph_objects as go\n'), ((309, 18, 309, 62), 'pandas.to_datetime', 'pd.to_datetime', ({(309, 33, 309, 61): "trade_history.loc[0, 'Date']"}, {}), "(trade_history.loc[0, 'Date'])", True, 'import pandas as pd\n'), ((309, 65, 309, 79), 'datetime.timedelta', 'timedelta', ({(309, 75, 309, 78): '150'}, {}), '(150)', False, 'from datetime import date, timedelta\n')] |
amsks/generic_flexbe_states | flexbe_navigation_states/src/flexbe_navigation_states/navigation_sm.py | f7be84105d3370c943ed17fc19af672b330726de | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from flexbe_states.wait_state import WaitState
from flexbe_navigation_states.turn_right_sm import turn_rightSM
from flexbe_states.subscriber_state import SubscriberState
from flexbe_utility_states.MARCO import Carbonara
from flexbe_navigation_states.turn_left_sm import turn_leftSM
from flexbe_navigation_states.go_straight_sm import go_straightSM
from flexbe_navigation_states.obstacle_avoidance_sm import Obstacle_AvoidanceSM
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Sat Jul 18 2020
@author: TG4
'''
class NavigationSM(Behavior):
'''
Integrated behaviour
'''
def __init__(self):
super(NavigationSM, self).__init__()
self.name = 'Navigation'
# parameters of this behavior
# references to used behaviors
self.add_behavior(turn_rightSM, 'turn_right')
self.add_behavior(turn_leftSM, 'turn_left')
self.add_behavior(go_straightSM, 'go_straight')
self.add_behavior(go_straightSM, 'go_straight_2')
self.add_behavior(go_straightSM, 'go_straight_3')
self.add_behavior(Obstacle_AvoidanceSM, 'Obstacle_Avoidance')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:1683 y:419, x:605 y:337
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:58 y:69
OperatableStateMachine.add('w1',
WaitState(wait_time=1),
transitions={'done': 's1'},
autonomy={'done': Autonomy.Off})
# x:1090 y:488
OperatableStateMachine.add('turn_right',
self.use_behavior(turn_rightSM, 'turn_right'),
transitions={'finished': 'w2', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:55 y:196
OperatableStateMachine.add('s1',
SubscriberState(topic='/darknet_ros/bounding_boxes', blocking=True, clear=False),
transitions={'received': 'carb1', 'unavailable': 'w1'},
autonomy={'received': Autonomy.Off, 'unavailable': Autonomy.Off},
remapping={'message': 'detected'})
# x:286 y:212
OperatableStateMachine.add('carb1',
Carbonara(),
transitions={'none': 'go_straight', 'Obstacle': 'Obstacle_Avoidance', 'Left': 'go_straight_2', 'Right': 'go_straight_3'},
autonomy={'none': Autonomy.Off, 'Obstacle': Autonomy.Off, 'Left': Autonomy.Off, 'Right': Autonomy.Off},
remapping={'input_value': 'detected', 'Distance': 'Distance'})
# x:1180 y:246
OperatableStateMachine.add('w2',
WaitState(wait_time=1),
transitions={'done': 'w5'},
autonomy={'done': Autonomy.Off})
# x:1161 y:64
OperatableStateMachine.add('w5',
WaitState(wait_time=1),
transitions={'done': 'w1'},
autonomy={'done': Autonomy.Off})
# x:958 y:119
OperatableStateMachine.add('turn_left',
self.use_behavior(turn_leftSM, 'turn_left'),
transitions={'finished': 'w2', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:906 y:276
OperatableStateMachine.add('go_straight',
self.use_behavior(go_straightSM, 'go_straight'),
transitions={'finished': 'w2', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:679 y:118
OperatableStateMachine.add('go_straight_2',
self.use_behavior(go_straightSM, 'go_straight_2'),
transitions={'finished': 'turn_left', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:715 y:484
OperatableStateMachine.add('go_straight_3',
self.use_behavior(go_straightSM, 'go_straight_3'),
transitions={'finished': 'turn_right', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:381 y:495
OperatableStateMachine.add('Obstacle_Avoidance',
self.use_behavior(Obstacle_AvoidanceSM, 'Obstacle_Avoidance'),
transitions={'finished': 's1', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| [((59, 19, 59, 74), 'flexbe_core.OperatableStateMachine', 'OperatableStateMachine', (), '', False, 'from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger\n'), ((70, 10, 70, 32), 'flexbe_states.wait_state.WaitState', 'WaitState', (), '', False, 'from flexbe_states.wait_state import WaitState\n'), ((82, 10, 82, 90), 'flexbe_states.subscriber_state.SubscriberState', 'SubscriberState', (), '', False, 'from flexbe_states.subscriber_state import SubscriberState\n'), ((89, 10, 89, 21), 'flexbe_utility_states.MARCO.Carbonara', 'Carbonara', ({}, {}), '()', False, 'from flexbe_utility_states.MARCO import Carbonara\n'), ((96, 10, 96, 32), 'flexbe_states.wait_state.WaitState', 'WaitState', (), '', False, 'from flexbe_states.wait_state import WaitState\n'), ((102, 10, 102, 32), 'flexbe_states.wait_state.WaitState', 'WaitState', (), '', False, 'from flexbe_states.wait_state import WaitState\n')] |
dlehman83/text2cc | text2cc/xml_assessment.py | 303798993590bceaeb5238a6cce82893c37cdfc7 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2021, Dana Lehman
# Copyright (c) 2020, Geoffrey M. Poore
# All rights reserved.
#
# Licensed under the BSD 3-Clause License:
# http://opensource.org/licenses/BSD-3-Clause
#
from .quiz import Quiz, Question, GroupStart, GroupEnd, TextRegion
BEFORE_ITEMS = '''\
<?xml version="1.0" encoding="UTF-8"?>
<questestinterop xmlns="http://www.imsglobal.org/xsd/ims_qtiasiv1p2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.imsglobal.org/xsd/ims_qtiasiv1p2 http://www.imsglobal.org/profile/cc/ccv1p2/ccv1p2_qtiasiv1p2p1_v1p0.xsd">
<assessment ident="{assessment_identifier}" title="{title}">
<qtimetadata>
<qtimetadatafield>
<fieldlabel>cc_maxattempts</fieldlabel>
<fieldentry>1</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>
cc_profile
</fieldlabel>
<fieldentry>
cc.exam.v0p1
</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>
qmd_assessmenttype
</fieldlabel>
<fieldentry>
Examination
</fieldentry>
</qtimetadatafield>
</qtimetadata>
<section ident="root_section">
'''
AFTER_ITEMS = '''\
</section>
</assessment>
</questestinterop>
'''
GROUP_START = '''\
<section ident="{ident}" title="{group_title}">
<selection_ordering>
<selection>
<selection_number>{pick}</selection_number>
<selection_extension>
<points_per_item>{points_per_item}</points_per_item>
</selection_extension>
</selection>
</selection_ordering>
'''
GROUP_END = '''\
</section>
'''
TEXT = '''\
<item ident="{ident}" title="{text_title_xml}">
<itemmetadata>
<qtimetadata>
<qtimetadatafield>
<fieldlabel>cc_profile</fieldlabel>
<fieldentry>text_only_question</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>points_possible</fieldlabel>
<fieldentry>0</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>original_answer_ids</fieldlabel>
<fieldentry></fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>assessment_question_identifierref</fieldlabel>
<fieldentry>{assessment_question_identifierref}</fieldentry>
</qtimetadatafield>
</qtimetadata>
</itemmetadata>
<presentation>
<material>
<mattext texttype="text/html">{text_html_xml}</mattext>
</material>
</presentation>
</item>
'''
START_ITEM = '''\
<item ident="{question_identifier}" title="{question_title}">
'''
END_ITEM = '''\
</item>
'''
ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM = '''\
<itemmetadata>
<qtimetadata>
<qtimetadatafield>
<fieldlabel>cc_profile</fieldlabel>
<fieldentry>{question_type}</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>points_possible</fieldlabel>
<fieldentry>{points_possible}</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>original_answer_ids</fieldlabel>
<fieldentry>{original_answer_ids}</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>assessment_question_identifierref</fieldlabel>
<fieldentry>{assessment_question_identifierref}</fieldentry>
</qtimetadatafield>
</qtimetadata>
</itemmetadata>
'''
ITEM_METADATA_ESSAY = ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM.replace('{original_answer_ids}', '')
ITEM_METADATA_UPLOAD = ITEM_METADATA_ESSAY
ITEM_PRESENTATION_MCTF = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_lid ident="response1" rcardinality="Single">
<render_choice>
{choices}
</render_choice>
</response_lid>
</presentation>
'''
ITEM_PRESENTATION_MCTF_CHOICE = '''\
<response_label ident="{ident}">
<material>
<mattext texttype="text/html">{choice_html_xml}</mattext>
</material>
</response_label>'''
ITEM_PRESENTATION_MULTANS = ITEM_PRESENTATION_MCTF.replace('Single', 'Multiple')
ITEM_PRESENTATION_MULTANS_CHOICE = ITEM_PRESENTATION_MCTF_CHOICE
ITEM_PRESENTATION_SHORTANS = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_str ident="response1" rcardinality="Single">
<render_fib>
<response_label ident="answer1" rshuffle="No"/>
</render_fib>
</response_str>
</presentation>
'''
ITEM_PRESENTATION_ESSAY = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_str ident="response1" rcardinality="Single">
<render_fib>
<response_label ident="answer1" rshuffle="No"/>
</render_fib>
</response_str>
</presentation>
'''
ITEM_PRESENTATION_UPLOAD = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
</presentation>
'''
ITEM_PRESENTATION_NUM = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_str ident="response1" rcardinality="Single">
<render_fib fibtype="Decimal">
<response_label ident="answer1"/>
</render_fib>
</response_str>
</presentation>
'''
ITEM_RESPROCESSING_START = '''\
<resprocessing>
<outcomes>
<decvar maxvalue="100" minvalue="0" varname="SCORE" vartype="Decimal"/>
</outcomes>
'''
ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<other/>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="general_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_CHOICE_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<varequal respident="response1">{ident}</varequal>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="{ident}_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<varequal respident="response1">{ident}</varequal>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<varequal respident="response1">{ident}</varequal>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<other/>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="general_incorrect_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_SHORTANS_CHOICE_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<varequal respident="response1">{answer_xml}</varequal>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="{ident}_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
{varequal}
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
{varequal}
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_VAREQUAL = '''\
<varequal respident="response1">{answer_xml}</varequal>'''
ITEM_RESPROCESSING_SHORTANS_INCORRECT_FEEDBACK = ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK
ITEM_RESPROCESSING_MULTANS_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_MULTANS_CHOICE_FEEDBACK = ITEM_RESPROCESSING_MCTF_CHOICE_FEEDBACK
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<and>
{varequal}
</and>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<and>
{varequal}
</and>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_CORRECT = '''\
<varequal respident="response1">{ident}</varequal>'''
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_INCORRECT = '''\
<not>
<varequal respident="response1">{ident}</varequal>
</not>'''
ITEM_RESPROCESSING_MULTANS_INCORRECT_FEEDBACK = ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK
ITEM_RESPROCESSING_ESSAY_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_UPLOAD_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_NUM_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<or>
<varequal respident="response1">{num_exact}</varequal>
<and>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</and>
</or>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<or>
<varequal respident="response1">{num_exact}</varequal>
<and>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</and>
</or>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_INCORRECT_FEEDBACK = ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK
ITEM_RESPROCESSING_ESSAY = '''\
<respcondition continue="No">
<conditionvar>
<other/>
</conditionvar>
</respcondition>
'''
ITEM_RESPROCESSING_END = '''\
</resprocessing>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_GENERAL = '''\
<itemfeedback ident="general_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_CORRECT = '''\
<itemfeedback ident="correct_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INCORRECT = '''\
<itemfeedback ident="general_incorrect_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INDIVIDUAL = '''\
<itemfeedback ident="{ident}_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
def assessment(*, quiz: Quiz, assessment_identifier: str, title_xml: str) -> str:
'''
Generate assessment XML from Quiz.
'''
xml = []
xml.append(BEFORE_ITEMS.format(assessment_identifier=assessment_identifier,
title=title_xml))
for question_or_delim in quiz.questions_and_delims:
if isinstance(question_or_delim, TextRegion):
xml.append(TEXT.format(ident=f'text2qti_text_{question_or_delim.id}',
text_title_xml=question_or_delim.title_xml,
assessment_question_identifierref=f'text2qti_question_ref_{question_or_delim.id}',
text_html_xml=question_or_delim.text_html_xml))
continue
if isinstance(question_or_delim, GroupStart):
xml.append(GROUP_START.format(ident=f'text2qti_group_{question_or_delim.group.id}',
group_title=question_or_delim.group.title_xml,
pick=question_or_delim.group.pick,
points_per_item=question_or_delim.group.points_per_question))
continue
if isinstance(question_or_delim, GroupEnd):
xml.append(GROUP_END)
continue
if not isinstance(question_or_delim, Question):
raise TypeError
question = question_or_delim
xml.append(START_ITEM.format(question_identifier=f'text2qti_question_{question.id}',
question_title=question.title_xml))
if question.type in ('true_false_question', 'multiple_choice_question',
'short_answer_question', 'multiple_answers_question'):
item_metadata = ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM
original_answer_ids = ','.join(f'text2qti_choice_{c.id}' for c in question.choices)
elif question.type == 'numerical_question':
item_metadata = ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM
original_answer_ids = f'text2qti_numerical_{question.id}'
elif question.type == 'essay_question':
item_metadata = ITEM_METADATA_ESSAY
original_answer_ids = f'text2qti_essay_{question.id}'
elif question.type == 'file_upload_question':
item_metadata = ITEM_METADATA_UPLOAD
original_answer_ids = f'text2qti_upload_{question.id}'
else:
raise ValueError
#Type Change for Schoology CC Import
if question.type == 'multiple_choice_question':
typechange = 'cc.multiple_choice.v0p1'
elif question.type == 'true_false_question':
typechange = 'cc.true_false.v0p1'
elif question.type == 'short_answer_question':
typechange = 'cc.fib.v0p1'
elif question.type == 'multiple_answers_question':
typechange = 'cc.multiple_response.v0p1'
elif question.type == 'essay_question':
typechange = 'cc.essay.v0p1'
else:
typechange = question.type
xml.append(item_metadata.format(question_type=typechange,
points_possible=question.points_possible,
original_answer_ids=original_answer_ids,
assessment_question_identifierref=f'text2qti_question_ref_{question.id}'))
if question.type in ('true_false_question', 'multiple_choice_question', 'multiple_answers_question'):
if question.type in ('true_false_question', 'multiple_choice_question'):
item_presentation_choice = ITEM_PRESENTATION_MCTF_CHOICE
item_presentation = ITEM_PRESENTATION_MCTF
elif question.type == 'multiple_answers_question':
item_presentation_choice = ITEM_PRESENTATION_MULTANS_CHOICE
item_presentation = ITEM_PRESENTATION_MULTANS
else:
raise ValueError
choices = '\n'.join(item_presentation_choice.format(ident=f'text2qti_choice_{c.id}', choice_html_xml=c.choice_html_xml)
for c in question.choices)
xml.append(item_presentation.format(question_html_xml=question.question_html_xml, choices=choices))
elif question.type == 'short_answer_question':
xml.append(ITEM_PRESENTATION_SHORTANS.format(question_html_xml=question.question_html_xml))
elif question.type == 'numerical_question':
xml.append(ITEM_PRESENTATION_NUM.format(question_html_xml=question.question_html_xml))
elif question.type == 'essay_question':
xml.append(ITEM_PRESENTATION_ESSAY.format(question_html_xml=question.question_html_xml))
elif question.type == 'file_upload_question':
xml.append(ITEM_PRESENTATION_UPLOAD.format(question_html_xml=question.question_html_xml))
else:
raise ValueError
if question.type in ('true_false_question', 'multiple_choice_question'):
correct_choice = None
for choice in question.choices:
if choice.correct:
correct_choice = choice
break
if correct_choice is None:
raise TypeError
resprocessing = []
resprocessing.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK)
for choice in question.choices:
if choice.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_CHOICE_FEEDBACK.format(ident=f'text2qti_choice_{choice.id}'))
if question.correct_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_SET_CORRECT_WITH_FEEDBACK.format(ident=f'text2qti_choice_{correct_choice.id}'))
else:
resprocessing.append(ITEM_RESPROCESSING_MCTF_SET_CORRECT_NO_FEEDBACK.format(ident=f'text2qti_choice_{correct_choice.id}'))
if question.incorrect_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK)
resprocessing.append(ITEM_RESPROCESSING_END)
xml.extend(resprocessing)
elif question.type == 'short_answer_question':
resprocessing = []
resprocessing.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_GENERAL_FEEDBACK)
for choice in question.choices:
if choice.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_CHOICE_FEEDBACK.format(ident=f'text2qti_choice_{choice.id}', answer_xml=choice.choice_xml))
varequal = []
for choice in question.choices:
varequal.append(ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_VAREQUAL.format(answer_xml=choice.choice_xml))
if question.correct_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_WITH_FEEDBACK.format(varequal='\n'.join(varequal)))
else:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_NO_FEEDBACK.format(varequal='\n'.join(varequal)))
if question.incorrect_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_INCORRECT_FEEDBACK)
resprocessing.append(ITEM_RESPROCESSING_END)
xml.extend(resprocessing)
elif question.type == 'multiple_answers_question':
resprocessing = []
resprocessing.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_GENERAL_FEEDBACK)
for choice in question.choices:
if choice.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_CHOICE_FEEDBACK.format(ident=f'text2qti_choice_{choice.id}'))
varequal = []
for choice in question.choices:
if choice.correct:
varequal.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_CORRECT.format(ident=f'text2qti_choice_{choice.id}'))
else:
varequal.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_INCORRECT.format(ident=f'text2qti_choice_{choice.id}'))
if question.correct_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_WITH_FEEDBACK.format(varequal='\n'.join(varequal)))
else:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_NO_FEEDBACK.format(varequal='\n'.join(varequal)))
if question.incorrect_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_INCORRECT_FEEDBACK)
resprocessing.append(ITEM_RESPROCESSING_END)
xml.extend(resprocessing)
elif question.type == 'numerical_question':
xml.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_NUM_GENERAL_FEEDBACK)
if question.correct_feedback_raw is None:
if question.numerical_exact is None:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_NO_FEEDBACK
else:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_NO_FEEDBACK
else:
if question.numerical_exact is None:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_WITH_FEEDBACK
else:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_WITH_FEEDBACK
xml.append(item_resprocessing_num_set_correct.format(num_min=question.numerical_min_html_xml,
num_exact=question.numerical_exact_html_xml,
num_max=question.numerical_max_html_xml))
if question.incorrect_feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_NUM_INCORRECT_FEEDBACK)
xml.append(ITEM_RESPROCESSING_END)
elif question.type == 'essay_question':
xml.append(ITEM_RESPROCESSING_START)
xml.append(ITEM_RESPROCESSING_ESSAY)
if question.feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_ESSAY_GENERAL_FEEDBACK)
xml.append(ITEM_RESPROCESSING_END)
elif question.type == 'file_upload_question':
xml.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_UPLOAD_GENERAL_FEEDBACK)
xml.append(ITEM_RESPROCESSING_END)
else:
raise ValueError
if question.type in ('true_false_question', 'multiple_choice_question',
'short_answer_question', 'multiple_answers_question',
'numerical_question', 'essay_question', 'file_upload_question'):
if question.feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_GENERAL.format(feedback=question.feedback_html_xml))
if question.correct_feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_CORRECT.format(feedback=question.correct_feedback_html_xml))
if question.incorrect_feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INCORRECT.format(feedback=question.incorrect_feedback_html_xml))
if question.type in ('true_false_question', 'multiple_choice_question',
'short_answer_question', 'multiple_answers_question'):
for choice in question.choices:
if choice.feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INDIVIDUAL.format(ident=f'text2qti_choice_{choice.id}',
feedback=choice.feedback_html_xml))
xml.append(END_ITEM)
xml.append(AFTER_ITEMS)
return ''.join(xml)
| [] |
bids-standard/bids-statsmodels-design-synthesizer | tests/test_aggregate_stats_design.py | d8a1dac3891760990082c2d3aa75a1edda44ffa0 | #!/usr/bin/env python
"""Tests for `bids_statsmodels_design_synthesizer` package."""
import pytest
import subprocess as sp
from pathlib import Path
SYNTHESIZER = "aggregate_stats_design.py"
from bids_statsmodels_design_synthesizer import aggregate_stats_design as synth_mod
# from bids_statsmodels_design_synthesizer import Path(SYNTHESIZER).stem as synth_mod
EXAMPLE_USER_ARGS = {
"OUTPUT_TSV": "aggregated_design.tsv",
"MODEL": "data/ds000003/models/model-001_smdl.json",
"EVENTS_TSV": "data/ds000003/sub-01/func/sub-01_task-rhymejudgment_events.tsv",
"DURATION": 320,
}
def test_cli_help():
with pytest.raises(sp.CalledProcessError):
output = sp.check_output([SYNTHESIZER, "-h"])
with pytest.raises(sp.CalledProcessError):
output = sp.check_output([SYNTHESIZER, "--non-existent"])
def test_design_aggregation_function():
synth_mod.main(EXAMPLE_USER_ARGS)
def test_minimal_cli_functionality():
"""
We roughly want to implement the equivalent of the following:
from bids.analysis import Analysis
from bids.layout import BIDSLayout
layout = BIDSLayout("data/ds000003")
analysis = Analysis(model="data/ds000003/models/model-001_smdl.json",layout=layout)
analysis.setup()
more specifically we want to reimplement this line
https://github.com/bids-standard/pybids/blob/b6cd0f6787230ce976a374fbd5fce650865752a3/bids/analysis/analysis.py#L282
"""
bids_dir = Path(__file__).parent / "data/ds000003"
model = "model-001_smdl.json"
arg_list = " " .join([f"""--{k.lower().replace("_","-")}={v}""" for k,v in EXAMPLE_USER_ARGS.items()])
cmd = f"{SYNTHESIZER} {arg_list}"
output = sp.check_output(cmd.split())
@pytest.mark.xfail(reason="Container not setup for boutiques yet")
def test_minimal_cli_functionality_using_boutiques():
"""This might be nice to do. boutiques sets /bin/sh as the entrypoint for the contain to /bin/sh so this should be tweaked to have the conda env and the pip installed package working correctly"""
boutiques_dir = Path(__file__).parent.parent / "boutiques"
cmd = f"""
bosh
exec
launch
{boutiques_dir}/bids-app-bids-statsmodels-design-synthesizer.json
{boutiques_dir}/invocation.json
"""
output = sp.check_output(cmd.split())
| [((52, 1, 52, 66), 'pytest.mark.xfail', 'pytest.mark.xfail', (), '', False, 'import pytest\n'), ((29, 4, 29, 37), 'bids_statsmodels_design_synthesizer.aggregate_stats_design.main', 'synth_mod.main', ({(29, 19, 29, 36): 'EXAMPLE_USER_ARGS'}, {}), '(EXAMPLE_USER_ARGS)', True, 'from bids_statsmodels_design_synthesizer import aggregate_stats_design as synth_mod\n'), ((22, 9, 22, 45), 'pytest.raises', 'pytest.raises', ({(22, 23, 22, 44): 'sp.CalledProcessError'}, {}), '(sp.CalledProcessError)', False, 'import pytest\n'), ((23, 17, 23, 53), 'subprocess.check_output', 'sp.check_output', ({(23, 33, 23, 52): "[SYNTHESIZER, '-h']"}, {}), "([SYNTHESIZER, '-h'])", True, 'import subprocess as sp\n'), ((24, 9, 24, 45), 'pytest.raises', 'pytest.raises', ({(24, 23, 24, 44): 'sp.CalledProcessError'}, {}), '(sp.CalledProcessError)', False, 'import pytest\n'), ((25, 17, 25, 65), 'subprocess.check_output', 'sp.check_output', ({(25, 33, 25, 64): "[SYNTHESIZER, '--non-existent']"}, {}), "([SYNTHESIZER, '--non-existent'])", True, 'import subprocess as sp\n'), ((45, 15, 45, 29), 'pathlib.Path', 'Path', ({(45, 20, 45, 28): '__file__'}, {}), '(__file__)', False, 'from pathlib import Path\n'), ((55, 20, 55, 34), 'pathlib.Path', 'Path', ({(55, 25, 55, 33): '__file__'}, {}), '(__file__)', False, 'from pathlib import Path\n')] |
skynetera/skynet | skynet-agent/plugins/plugin_api.py | 24a50f2a2eb95b777802934a2b66f162bf4b2d53 | #!/usr/bin/env python
# coding: utf-8
__author__ = 'whoami'
"""
@version: 1.0
@author: whoami
@license: Apache Licence 2.0
@contact: [email protected]
@site: http://www.itweet.cn
@software: PyCharm Community Edition
@file: plugin_api.py
@time: 2015-11-28 下午1:52
"""
from linux import cpu,disk,iostats,loadavg,memory,netstats,swap
def get_load_info():
return loadavg.monitor()
def get_cpu_status():
return cpu.monitor()
def get_memory_info():
return memory.monitor()
def get_swap_info():
return swap.monitor()
def get_disk_info():
return disk.monitor()
def get_network_info():
return netstats.monitor()
def get_iostats_info():
return iostats.monitor()
| [((19, 11, 19, 28), 'linux.loadavg.monitor', 'loadavg.monitor', ({}, {}), '()', False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n'), ((22, 11, 22, 24), 'linux.cpu.monitor', 'cpu.monitor', ({}, {}), '()', False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n'), ((25, 11, 25, 27), 'linux.memory.monitor', 'memory.monitor', ({}, {}), '()', False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n'), ((28, 11, 28, 25), 'linux.swap.monitor', 'swap.monitor', ({}, {}), '()', False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n'), ((31, 11, 31, 25), 'linux.disk.monitor', 'disk.monitor', ({}, {}), '()', False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n'), ((34, 11, 34, 29), 'linux.netstats.monitor', 'netstats.monitor', ({}, {}), '()', False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n'), ((37, 11, 37, 28), 'linux.iostats.monitor', 'iostats.monitor', ({}, {}), '()', False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n')] |
jwcarr/drift | code/figure_warp.py | a514c5970ba53025cc142257e953c1bda3cd049c | import numpy as np
import eyekit
import algorithms
import core
data = eyekit.io.load(core.FIXATIONS / 'sample.json')
passages = eyekit.io.load(core.DATA / 'passages.json')
original_sequence = data['trial_5']['fixations']
fixation_XY = np.array([fixation.xy for fixation in original_sequence], dtype=int)
word_XY = np.array([word.center for word in passages['1B'].words(alphabetical_only=False)], dtype=int)
start_times = np.array([i*100 for i in range(len(word_XY))], dtype=int)
expected_sequence = eyekit.FixationSequence(np.column_stack([word_XY, start_times, start_times+100]))
diagram = eyekit.vis.Image(1920, 1080)
diagram.draw_text_block(passages['1B'], mask_text=True)
diagram.draw_fixation_sequence(expected_sequence, color='#E32823', fixation_radius=6)
diagram.draw_fixation_sequence(original_sequence, color='#205E84', fixation_radius=6)
_, warping_path = algorithms.dynamic_time_warping(fixation_XY, word_XY)
for fixation, mapped_words in zip(original_sequence, warping_path):
for word_i in mapped_words:
word_x, word_y = word_XY[word_i]
diagram.draw_line(fixation.xy, (word_x, word_y), color='black', stroke_width=0.5, dashed=True)
fig = eyekit.vis.Figure()
fig.add_image(diagram)
fig.set_crop_margin(2)
fig.set_padding(vertical=2, horizontal=3, edge=1)
fig.set_enumeration(False)
fig.save(core.VISUALS / 'illustration_warp.pdf', width=83)
# fig.save(core.FIGS / 'fig02_single_column.eps', width=83)
| [((6, 7, 6, 53), 'eyekit.io.load', 'eyekit.io.load', ({(6, 22, 6, 52): "core.FIXATIONS / 'sample.json'"}, {}), "(core.FIXATIONS / 'sample.json')", False, 'import eyekit\n'), ((7, 11, 7, 54), 'eyekit.io.load', 'eyekit.io.load', ({(7, 26, 7, 53): "core.DATA / 'passages.json'"}, {}), "(core.DATA / 'passages.json')", False, 'import eyekit\n'), ((11, 14, 11, 82), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((17, 10, 17, 38), 'eyekit.vis.Image', 'eyekit.vis.Image', ({(17, 27, 17, 31): '1920', (17, 33, 17, 37): '1080'}, {}), '(1920, 1080)', False, 'import eyekit\n'), ((22, 18, 22, 71), 'algorithms.dynamic_time_warping', 'algorithms.dynamic_time_warping', ({(22, 50, 22, 61): 'fixation_XY', (22, 63, 22, 70): 'word_XY'}, {}), '(fixation_XY, word_XY)', False, 'import algorithms\n'), ((29, 6, 29, 25), 'eyekit.vis.Figure', 'eyekit.vis.Figure', ({}, {}), '()', False, 'import eyekit\n'), ((15, 44, 15, 100), 'numpy.column_stack', 'np.column_stack', ({(15, 60, 15, 99): '[word_XY, start_times, start_times + 100]'}, {}), '([word_XY, start_times, start_times + 100])', True, 'import numpy as np\n')] |
krux/python-storm | storm/Nimbus.py | 1a9c06d3580a2b1bc2c27174d892a6dbcaa9e0bd | #
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def submitTopology(self, name, uploadedJarLocation, jsonConf, topology):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
"""
pass
def submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
- options
"""
pass
def killTopology(self, name):
"""
Parameters:
- name
"""
pass
def killTopologyWithOpts(self, name, options):
"""
Parameters:
- name
- options
"""
pass
def activate(self, name):
"""
Parameters:
- name
"""
pass
def deactivate(self, name):
"""
Parameters:
- name
"""
pass
def rebalance(self, name, options):
"""
Parameters:
- name
- options
"""
pass
def beginFileUpload(self, ):
pass
def uploadChunk(self, location, chunk):
"""
Parameters:
- location
- chunk
"""
pass
def finishFileUpload(self, location):
"""
Parameters:
- location
"""
pass
def beginFileDownload(self, file):
"""
Parameters:
- file
"""
pass
def downloadChunk(self, id):
"""
Parameters:
- id
"""
pass
def getNimbusConf(self, ):
pass
def getClusterInfo(self, ):
pass
def getTopologyInfo(self, id):
"""
Parameters:
- id
"""
pass
def getTopologyConf(self, id):
"""
Parameters:
- id
"""
pass
def getTopology(self, id):
"""
Parameters:
- id
"""
pass
def getUserTopology(self, id):
"""
Parameters:
- id
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def submitTopology(self, name, uploadedJarLocation, jsonConf, topology):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
"""
self.send_submitTopology(name, uploadedJarLocation, jsonConf, topology)
self.recv_submitTopology()
def send_submitTopology(self, name, uploadedJarLocation, jsonConf, topology):
self._oprot.writeMessageBegin('submitTopology', TMessageType.CALL, self._seqid)
args = submitTopology_args()
args.name = name
args.uploadedJarLocation = uploadedJarLocation
args.jsonConf = jsonConf
args.topology = topology
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_submitTopology(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = submitTopology_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.ite is not None:
raise result.ite
return
def submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
- options
"""
self.send_submitTopologyWithOpts(name, uploadedJarLocation, jsonConf, topology, options)
self.recv_submitTopologyWithOpts()
def send_submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options):
self._oprot.writeMessageBegin('submitTopologyWithOpts', TMessageType.CALL, self._seqid)
args = submitTopologyWithOpts_args()
args.name = name
args.uploadedJarLocation = uploadedJarLocation
args.jsonConf = jsonConf
args.topology = topology
args.options = options
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_submitTopologyWithOpts(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = submitTopologyWithOpts_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.ite is not None:
raise result.ite
return
def killTopology(self, name):
"""
Parameters:
- name
"""
self.send_killTopology(name)
self.recv_killTopology()
def send_killTopology(self, name):
self._oprot.writeMessageBegin('killTopology', TMessageType.CALL, self._seqid)
args = killTopology_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_killTopology(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = killTopology_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def killTopologyWithOpts(self, name, options):
"""
Parameters:
- name
- options
"""
self.send_killTopologyWithOpts(name, options)
self.recv_killTopologyWithOpts()
def send_killTopologyWithOpts(self, name, options):
self._oprot.writeMessageBegin('killTopologyWithOpts', TMessageType.CALL, self._seqid)
args = killTopologyWithOpts_args()
args.name = name
args.options = options
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_killTopologyWithOpts(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = killTopologyWithOpts_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def activate(self, name):
"""
Parameters:
- name
"""
self.send_activate(name)
self.recv_activate()
def send_activate(self, name):
self._oprot.writeMessageBegin('activate', TMessageType.CALL, self._seqid)
args = activate_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_activate(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = activate_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def deactivate(self, name):
"""
Parameters:
- name
"""
self.send_deactivate(name)
self.recv_deactivate()
def send_deactivate(self, name):
self._oprot.writeMessageBegin('deactivate', TMessageType.CALL, self._seqid)
args = deactivate_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deactivate(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deactivate_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def rebalance(self, name, options):
"""
Parameters:
- name
- options
"""
self.send_rebalance(name, options)
self.recv_rebalance()
def send_rebalance(self, name, options):
self._oprot.writeMessageBegin('rebalance', TMessageType.CALL, self._seqid)
args = rebalance_args()
args.name = name
args.options = options
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_rebalance(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = rebalance_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.ite is not None:
raise result.ite
return
def beginFileUpload(self, ):
self.send_beginFileUpload()
return self.recv_beginFileUpload()
def send_beginFileUpload(self, ):
self._oprot.writeMessageBegin('beginFileUpload', TMessageType.CALL, self._seqid)
args = beginFileUpload_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_beginFileUpload(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = beginFileUpload_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "beginFileUpload failed: unknown result");
def uploadChunk(self, location, chunk):
"""
Parameters:
- location
- chunk
"""
self.send_uploadChunk(location, chunk)
self.recv_uploadChunk()
def send_uploadChunk(self, location, chunk):
self._oprot.writeMessageBegin('uploadChunk', TMessageType.CALL, self._seqid)
args = uploadChunk_args()
args.location = location
args.chunk = chunk
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_uploadChunk(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = uploadChunk_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def finishFileUpload(self, location):
"""
Parameters:
- location
"""
self.send_finishFileUpload(location)
self.recv_finishFileUpload()
def send_finishFileUpload(self, location):
self._oprot.writeMessageBegin('finishFileUpload', TMessageType.CALL, self._seqid)
args = finishFileUpload_args()
args.location = location
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_finishFileUpload(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = finishFileUpload_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def beginFileDownload(self, file):
"""
Parameters:
- file
"""
self.send_beginFileDownload(file)
return self.recv_beginFileDownload()
def send_beginFileDownload(self, file):
self._oprot.writeMessageBegin('beginFileDownload', TMessageType.CALL, self._seqid)
args = beginFileDownload_args()
args.file = file
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_beginFileDownload(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = beginFileDownload_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "beginFileDownload failed: unknown result");
def downloadChunk(self, id):
"""
Parameters:
- id
"""
self.send_downloadChunk(id)
return self.recv_downloadChunk()
def send_downloadChunk(self, id):
self._oprot.writeMessageBegin('downloadChunk', TMessageType.CALL, self._seqid)
args = downloadChunk_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_downloadChunk(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = downloadChunk_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "downloadChunk failed: unknown result");
def getNimbusConf(self, ):
self.send_getNimbusConf()
return self.recv_getNimbusConf()
def send_getNimbusConf(self, ):
self._oprot.writeMessageBegin('getNimbusConf', TMessageType.CALL, self._seqid)
args = getNimbusConf_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getNimbusConf(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getNimbusConf_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getNimbusConf failed: unknown result");
def getClusterInfo(self, ):
self.send_getClusterInfo()
return self.recv_getClusterInfo()
def send_getClusterInfo(self, ):
self._oprot.writeMessageBegin('getClusterInfo', TMessageType.CALL, self._seqid)
args = getClusterInfo_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getClusterInfo(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getClusterInfo_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getClusterInfo failed: unknown result");
def getTopologyInfo(self, id):
"""
Parameters:
- id
"""
self.send_getTopologyInfo(id)
return self.recv_getTopologyInfo()
def send_getTopologyInfo(self, id):
self._oprot.writeMessageBegin('getTopologyInfo', TMessageType.CALL, self._seqid)
args = getTopologyInfo_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTopologyInfo(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getTopologyInfo_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopologyInfo failed: unknown result");
def getTopologyConf(self, id):
"""
Parameters:
- id
"""
self.send_getTopologyConf(id)
return self.recv_getTopologyConf()
def send_getTopologyConf(self, id):
self._oprot.writeMessageBegin('getTopologyConf', TMessageType.CALL, self._seqid)
args = getTopologyConf_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTopologyConf(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getTopologyConf_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopologyConf failed: unknown result");
def getTopology(self, id):
"""
Parameters:
- id
"""
self.send_getTopology(id)
return self.recv_getTopology()
def send_getTopology(self, id):
self._oprot.writeMessageBegin('getTopology', TMessageType.CALL, self._seqid)
args = getTopology_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTopology(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getTopology_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopology failed: unknown result");
def getUserTopology(self, id):
"""
Parameters:
- id
"""
self.send_getUserTopology(id)
return self.recv_getUserTopology()
def send_getUserTopology(self, id):
self._oprot.writeMessageBegin('getUserTopology', TMessageType.CALL, self._seqid)
args = getUserTopology_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getUserTopology(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getUserTopology_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getUserTopology failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["submitTopology"] = Processor.process_submitTopology
self._processMap["submitTopologyWithOpts"] = Processor.process_submitTopologyWithOpts
self._processMap["killTopology"] = Processor.process_killTopology
self._processMap["killTopologyWithOpts"] = Processor.process_killTopologyWithOpts
self._processMap["activate"] = Processor.process_activate
self._processMap["deactivate"] = Processor.process_deactivate
self._processMap["rebalance"] = Processor.process_rebalance
self._processMap["beginFileUpload"] = Processor.process_beginFileUpload
self._processMap["uploadChunk"] = Processor.process_uploadChunk
self._processMap["finishFileUpload"] = Processor.process_finishFileUpload
self._processMap["beginFileDownload"] = Processor.process_beginFileDownload
self._processMap["downloadChunk"] = Processor.process_downloadChunk
self._processMap["getNimbusConf"] = Processor.process_getNimbusConf
self._processMap["getClusterInfo"] = Processor.process_getClusterInfo
self._processMap["getTopologyInfo"] = Processor.process_getTopologyInfo
self._processMap["getTopologyConf"] = Processor.process_getTopologyConf
self._processMap["getTopology"] = Processor.process_getTopology
self._processMap["getUserTopology"] = Processor.process_getUserTopology
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_submitTopology(self, seqid, iprot, oprot):
args = submitTopology_args()
args.read(iprot)
iprot.readMessageEnd()
result = submitTopology_result()
try:
self._handler.submitTopology(args.name, args.uploadedJarLocation, args.jsonConf, args.topology)
except AlreadyAliveException as e:
result.e = e
except InvalidTopologyException as ite:
result.ite = ite
oprot.writeMessageBegin("submitTopology", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_submitTopologyWithOpts(self, seqid, iprot, oprot):
args = submitTopologyWithOpts_args()
args.read(iprot)
iprot.readMessageEnd()
result = submitTopologyWithOpts_result()
try:
self._handler.submitTopologyWithOpts(args.name, args.uploadedJarLocation, args.jsonConf, args.topology, args.options)
except AlreadyAliveException as e:
result.e = e
except InvalidTopologyException as ite:
result.ite = ite
oprot.writeMessageBegin("submitTopologyWithOpts", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_killTopology(self, seqid, iprot, oprot):
args = killTopology_args()
args.read(iprot)
iprot.readMessageEnd()
result = killTopology_result()
try:
self._handler.killTopology(args.name)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("killTopology", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_killTopologyWithOpts(self, seqid, iprot, oprot):
args = killTopologyWithOpts_args()
args.read(iprot)
iprot.readMessageEnd()
result = killTopologyWithOpts_result()
try:
self._handler.killTopologyWithOpts(args.name, args.options)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("killTopologyWithOpts", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_activate(self, seqid, iprot, oprot):
args = activate_args()
args.read(iprot)
iprot.readMessageEnd()
result = activate_result()
try:
self._handler.activate(args.name)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("activate", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deactivate(self, seqid, iprot, oprot):
args = deactivate_args()
args.read(iprot)
iprot.readMessageEnd()
result = deactivate_result()
try:
self._handler.deactivate(args.name)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("deactivate", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_rebalance(self, seqid, iprot, oprot):
args = rebalance_args()
args.read(iprot)
iprot.readMessageEnd()
result = rebalance_result()
try:
self._handler.rebalance(args.name, args.options)
except NotAliveException as e:
result.e = e
except InvalidTopologyException as ite:
result.ite = ite
oprot.writeMessageBegin("rebalance", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_beginFileUpload(self, seqid, iprot, oprot):
args = beginFileUpload_args()
args.read(iprot)
iprot.readMessageEnd()
result = beginFileUpload_result()
result.success = self._handler.beginFileUpload()
oprot.writeMessageBegin("beginFileUpload", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_uploadChunk(self, seqid, iprot, oprot):
args = uploadChunk_args()
args.read(iprot)
iprot.readMessageEnd()
result = uploadChunk_result()
self._handler.uploadChunk(args.location, args.chunk)
oprot.writeMessageBegin("uploadChunk", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_finishFileUpload(self, seqid, iprot, oprot):
args = finishFileUpload_args()
args.read(iprot)
iprot.readMessageEnd()
result = finishFileUpload_result()
self._handler.finishFileUpload(args.location)
oprot.writeMessageBegin("finishFileUpload", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_beginFileDownload(self, seqid, iprot, oprot):
args = beginFileDownload_args()
args.read(iprot)
iprot.readMessageEnd()
result = beginFileDownload_result()
result.success = self._handler.beginFileDownload(args.file)
oprot.writeMessageBegin("beginFileDownload", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_downloadChunk(self, seqid, iprot, oprot):
args = downloadChunk_args()
args.read(iprot)
iprot.readMessageEnd()
result = downloadChunk_result()
result.success = self._handler.downloadChunk(args.id)
oprot.writeMessageBegin("downloadChunk", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getNimbusConf(self, seqid, iprot, oprot):
args = getNimbusConf_args()
args.read(iprot)
iprot.readMessageEnd()
result = getNimbusConf_result()
result.success = self._handler.getNimbusConf()
oprot.writeMessageBegin("getNimbusConf", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getClusterInfo(self, seqid, iprot, oprot):
args = getClusterInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = getClusterInfo_result()
result.success = self._handler.getClusterInfo()
oprot.writeMessageBegin("getClusterInfo", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTopologyInfo(self, seqid, iprot, oprot):
args = getTopologyInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTopologyInfo_result()
try:
result.success = self._handler.getTopologyInfo(args.id)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("getTopologyInfo", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTopologyConf(self, seqid, iprot, oprot):
args = getTopologyConf_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTopologyConf_result()
try:
result.success = self._handler.getTopologyConf(args.id)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("getTopologyConf", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTopology(self, seqid, iprot, oprot):
args = getTopology_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTopology_result()
try:
result.success = self._handler.getTopology(args.id)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("getTopology", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getUserTopology(self, seqid, iprot, oprot):
args = getUserTopology_args()
args.read(iprot)
iprot.readMessageEnd()
result = getUserTopology_result()
try:
result.success = self._handler.getUserTopology(args.id)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("getUserTopology", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class submitTopology_args:
"""
Attributes:
- name
- uploadedJarLocation
- jsonConf
- topology
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'uploadedJarLocation', None, None, ), # 2
(3, TType.STRING, 'jsonConf', None, None, ), # 3
(4, TType.STRUCT, 'topology', (StormTopology, StormTopology.thrift_spec), None, ), # 4
)
def __init__(self, name=None, uploadedJarLocation=None, jsonConf=None, topology=None,):
self.name = name
self.uploadedJarLocation = uploadedJarLocation
self.jsonConf = jsonConf
self.topology = topology
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.uploadedJarLocation = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.jsonConf = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.topology = StormTopology()
self.topology.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitTopology_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.uploadedJarLocation is not None:
oprot.writeFieldBegin('uploadedJarLocation', TType.STRING, 2)
oprot.writeString(self.uploadedJarLocation)
oprot.writeFieldEnd()
if self.jsonConf is not None:
oprot.writeFieldBegin('jsonConf', TType.STRING, 3)
oprot.writeString(self.jsonConf)
oprot.writeFieldEnd()
if self.topology is not None:
oprot.writeFieldBegin('topology', TType.STRUCT, 4)
self.topology.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class submitTopology_result:
"""
Attributes:
- e
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (AlreadyAliveException, AlreadyAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ite', (InvalidTopologyException, InvalidTopologyException.thrift_spec), None, ), # 2
)
def __init__(self, e=None, ite=None,):
self.e = e
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = AlreadyAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ite = InvalidTopologyException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitTopology_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.ite is not None:
oprot.writeFieldBegin('ite', TType.STRUCT, 2)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class submitTopologyWithOpts_args:
"""
Attributes:
- name
- uploadedJarLocation
- jsonConf
- topology
- options
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'uploadedJarLocation', None, None, ), # 2
(3, TType.STRING, 'jsonConf', None, None, ), # 3
(4, TType.STRUCT, 'topology', (StormTopology, StormTopology.thrift_spec), None, ), # 4
(5, TType.STRUCT, 'options', (SubmitOptions, SubmitOptions.thrift_spec), None, ), # 5
)
def __init__(self, name=None, uploadedJarLocation=None, jsonConf=None, topology=None, options=None,):
self.name = name
self.uploadedJarLocation = uploadedJarLocation
self.jsonConf = jsonConf
self.topology = topology
self.options = options
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.uploadedJarLocation = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.jsonConf = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.topology = StormTopology()
self.topology.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.options = SubmitOptions()
self.options.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitTopologyWithOpts_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.uploadedJarLocation is not None:
oprot.writeFieldBegin('uploadedJarLocation', TType.STRING, 2)
oprot.writeString(self.uploadedJarLocation)
oprot.writeFieldEnd()
if self.jsonConf is not None:
oprot.writeFieldBegin('jsonConf', TType.STRING, 3)
oprot.writeString(self.jsonConf)
oprot.writeFieldEnd()
if self.topology is not None:
oprot.writeFieldBegin('topology', TType.STRUCT, 4)
self.topology.write(oprot)
oprot.writeFieldEnd()
if self.options is not None:
oprot.writeFieldBegin('options', TType.STRUCT, 5)
self.options.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class submitTopologyWithOpts_result:
"""
Attributes:
- e
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (AlreadyAliveException, AlreadyAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ite', (InvalidTopologyException, InvalidTopologyException.thrift_spec), None, ), # 2
)
def __init__(self, e=None, ite=None,):
self.e = e
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = AlreadyAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ite = InvalidTopologyException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitTopologyWithOpts_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.ite is not None:
oprot.writeFieldBegin('ite', TType.STRUCT, 2)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class killTopology_args:
"""
Attributes:
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
)
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('killTopology_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class killTopology_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('killTopology_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class killTopologyWithOpts_args:
"""
Attributes:
- name
- options
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRUCT, 'options', (KillOptions, KillOptions.thrift_spec), None, ), # 2
)
def __init__(self, name=None, options=None,):
self.name = name
self.options = options
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.options = KillOptions()
self.options.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('killTopologyWithOpts_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.options is not None:
oprot.writeFieldBegin('options', TType.STRUCT, 2)
self.options.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class killTopologyWithOpts_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('killTopologyWithOpts_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class activate_args:
"""
Attributes:
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
)
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('activate_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class activate_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('activate_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deactivate_args:
"""
Attributes:
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
)
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deactivate_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deactivate_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deactivate_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rebalance_args:
"""
Attributes:
- name
- options
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRUCT, 'options', (RebalanceOptions, RebalanceOptions.thrift_spec), None, ), # 2
)
def __init__(self, name=None, options=None,):
self.name = name
self.options = options
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.options = RebalanceOptions()
self.options.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rebalance_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.options is not None:
oprot.writeFieldBegin('options', TType.STRUCT, 2)
self.options.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rebalance_result:
"""
Attributes:
- e
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ite', (InvalidTopologyException, InvalidTopologyException.thrift_spec), None, ), # 2
)
def __init__(self, e=None, ite=None,):
self.e = e
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ite = InvalidTopologyException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rebalance_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.ite is not None:
oprot.writeFieldBegin('ite', TType.STRUCT, 2)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginFileUpload_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginFileUpload_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginFileUpload_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginFileUpload_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class uploadChunk_args:
"""
Attributes:
- location
- chunk
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'location', None, None, ), # 1
(2, TType.STRING, 'chunk', None, None, ), # 2
)
def __init__(self, location=None, chunk=None,):
self.location = location
self.chunk = chunk
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.location = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.chunk = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('uploadChunk_args')
if self.location is not None:
oprot.writeFieldBegin('location', TType.STRING, 1)
oprot.writeString(self.location)
oprot.writeFieldEnd()
if self.chunk is not None:
oprot.writeFieldBegin('chunk', TType.STRING, 2)
oprot.writeString(self.chunk)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class uploadChunk_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('uploadChunk_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class finishFileUpload_args:
"""
Attributes:
- location
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'location', None, None, ), # 1
)
def __init__(self, location=None,):
self.location = location
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.location = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('finishFileUpload_args')
if self.location is not None:
oprot.writeFieldBegin('location', TType.STRING, 1)
oprot.writeString(self.location)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class finishFileUpload_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('finishFileUpload_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginFileDownload_args:
"""
Attributes:
- file
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'file', None, None, ), # 1
)
def __init__(self, file=None,):
self.file = file
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.file = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginFileDownload_args')
if self.file is not None:
oprot.writeFieldBegin('file', TType.STRING, 1)
oprot.writeString(self.file)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginFileDownload_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginFileDownload_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class downloadChunk_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('downloadChunk_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class downloadChunk_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('downloadChunk_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getNimbusConf_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getNimbusConf_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getNimbusConf_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getNimbusConf_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getClusterInfo_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getClusterInfo_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getClusterInfo_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ClusterSummary, ClusterSummary.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ClusterSummary()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getClusterInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyInfo_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyInfo_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyInfo_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TopologyInfo, TopologyInfo.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TopologyInfo()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyConf_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyConf_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyConf_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyConf_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopology_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopology_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopology_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (StormTopology, StormTopology.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = StormTopology()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopology_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getUserTopology_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getUserTopology_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getUserTopology_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (StormTopology, StormTopology.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = StormTopology()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getUserTopology_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| [((410, 10, 410, 111), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({(410, 32, 410, 68): 'TApplicationException.MISSING_RESULT', (410, 70, 410, 110): '"""beginFileUpload failed: unknown result"""'}, {}), "(TApplicationException.MISSING_RESULT,\n 'beginFileUpload failed: unknown result')", False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((498, 10, 498, 113), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({(498, 32, 498, 68): 'TApplicationException.MISSING_RESULT', (498, 70, 498, 112): '"""beginFileDownload failed: unknown result"""'}, {}), "(TApplicationException.MISSING_RESULT,\n 'beginFileDownload failed: unknown result')", False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((528, 10, 528, 109), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({(528, 32, 528, 68): 'TApplicationException.MISSING_RESULT', (528, 70, 528, 108): '"""downloadChunk failed: unknown result"""'}, {}), "(TApplicationException.MISSING_RESULT,\n 'downloadChunk failed: unknown result')", False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((553, 10, 553, 109), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({(553, 32, 553, 68): 'TApplicationException.MISSING_RESULT', (553, 70, 553, 108): '"""getNimbusConf failed: unknown result"""'}, {}), "(TApplicationException.MISSING_RESULT,\n 'getNimbusConf failed: unknown result')", False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((578, 10, 578, 110), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({(578, 32, 578, 68): 'TApplicationException.MISSING_RESULT', (578, 70, 578, 109): '"""getClusterInfo failed: unknown result"""'}, {}), "(TApplicationException.MISSING_RESULT,\n 'getClusterInfo failed: unknown result')", False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((610, 10, 610, 111), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({(610, 32, 610, 68): 'TApplicationException.MISSING_RESULT', (610, 70, 610, 110): '"""getTopologyInfo failed: unknown result"""'}, {}), "(TApplicationException.MISSING_RESULT,\n 'getTopologyInfo failed: unknown result')", False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((642, 10, 642, 111), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({(642, 32, 642, 68): 'TApplicationException.MISSING_RESULT', (642, 70, 642, 110): '"""getTopologyConf failed: unknown result"""'}, {}), "(TApplicationException.MISSING_RESULT,\n 'getTopologyConf failed: unknown result')", False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((674, 10, 674, 107), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({(674, 32, 674, 68): 'TApplicationException.MISSING_RESULT', (674, 70, 674, 106): '"""getTopology failed: unknown result"""'}, {}), "(TApplicationException.MISSING_RESULT,\n 'getTopology failed: unknown result')", False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((706, 10, 706, 111), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({(706, 32, 706, 68): 'TApplicationException.MISSING_RESULT', (706, 70, 706, 110): '"""getUserTopology failed: unknown result"""'}, {}), "(TApplicationException.MISSING_RESULT,\n 'getUserTopology failed: unknown result')", False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((178, 10, 178, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((218, 10, 218, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((250, 10, 250, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((282, 10, 282, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((312, 10, 312, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((342, 10, 342, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((374, 10, 374, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((401, 10, 401, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((433, 10, 433, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((461, 10, 461, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((489, 10, 489, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((519, 10, 519, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((544, 10, 544, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((569, 10, 569, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((599, 10, 599, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((631, 10, 631, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((663, 10, 663, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((695, 10, 695, 33), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({}, {}), '()', False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((737, 10, 737, 101), 'thrift.Thrift.TApplicationException', 'TApplicationException', ({(737, 32, 737, 68): 'TApplicationException.UNKNOWN_METHOD', (737, 70, 737, 100): "'Unknown function %s' % name"}, {}), "(TApplicationException.UNKNOWN_METHOD, \n 'Unknown function %s' % name)", False, 'from thrift.Thrift import TType, TMessageType, TException, TApplicationException\n'), ((1012, 6, 1012, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1012, 31, 1012, 35): 'self', (1012, 37, 1012, 48): 'iprot.trans', (1012, 50, 1012, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1103, 6, 1103, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1103, 31, 1103, 35): 'self', (1103, 37, 1103, 48): 'iprot.trans', (1103, 50, 1103, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1186, 6, 1186, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1186, 31, 1186, 35): 'self', (1186, 37, 1186, 48): 'iprot.trans', (1186, 50, 1186, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1287, 6, 1287, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1287, 31, 1287, 35): 'self', (1287, 37, 1287, 48): 'iprot.trans', (1287, 50, 1287, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1358, 6, 1358, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1358, 31, 1358, 35): 'self', (1358, 37, 1358, 48): 'iprot.trans', (1358, 50, 1358, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1418, 6, 1418, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1418, 31, 1418, 35): 'self', (1418, 37, 1418, 48): 'iprot.trans', (1418, 50, 1418, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1482, 6, 1482, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1482, 31, 1482, 35): 'self', (1482, 37, 1482, 48): 'iprot.trans', (1482, 50, 1482, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1552, 6, 1552, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1552, 31, 1552, 35): 'self', (1552, 37, 1552, 48): 'iprot.trans', (1552, 50, 1552, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1613, 6, 1613, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1613, 31, 1613, 35): 'self', (1613, 37, 1613, 48): 'iprot.trans', (1613, 50, 1613, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1673, 6, 1673, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1673, 31, 1673, 35): 'self', (1673, 37, 1673, 48): 'iprot.trans', (1673, 50, 1673, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1734, 6, 1734, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1734, 31, 1734, 35): 'self', (1734, 37, 1734, 48): 'iprot.trans', (1734, 50, 1734, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1794, 6, 1794, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1794, 31, 1794, 35): 'self', (1794, 37, 1794, 48): 'iprot.trans', (1794, 50, 1794, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1858, 6, 1858, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1858, 31, 1858, 35): 'self', (1858, 37, 1858, 48): 'iprot.trans', (1858, 50, 1858, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1931, 6, 1931, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1931, 31, 1931, 35): 'self', (1931, 37, 1931, 48): 'iprot.trans', (1931, 50, 1931, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1993, 6, 1993, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(1993, 31, 1993, 35): 'self', (1993, 37, 1993, 48): 'iprot.trans', (1993, 50, 1993, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2043, 6, 2043, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2043, 31, 2043, 35): 'self', (2043, 37, 2043, 48): 'iprot.trans', (2043, 50, 2043, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2106, 6, 2106, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2106, 31, 2106, 35): 'self', (2106, 37, 2106, 48): 'iprot.trans', (2106, 50, 2106, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2166, 6, 2166, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2166, 31, 2166, 35): 'self', (2166, 37, 2166, 48): 'iprot.trans', (2166, 50, 2166, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2217, 6, 2217, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2217, 31, 2217, 35): 'self', (2217, 37, 2217, 48): 'iprot.trans', (2217, 50, 2217, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2268, 6, 2268, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2268, 31, 2268, 35): 'self', (2268, 37, 2268, 48): 'iprot.trans', (2268, 50, 2268, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2319, 6, 2319, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2319, 31, 2319, 35): 'self', (2319, 37, 2319, 48): 'iprot.trans', (2319, 50, 2319, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2378, 6, 2378, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2378, 31, 2378, 35): 'self', (2378, 37, 2378, 48): 'iprot.trans', (2378, 50, 2378, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2438, 6, 2438, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2438, 31, 2438, 35): 'self', (2438, 37, 2438, 48): 'iprot.trans', (2438, 50, 2438, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2497, 6, 2497, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2497, 31, 2497, 35): 'self', (2497, 37, 2497, 48): 'iprot.trans', (2497, 50, 2497, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2548, 6, 2548, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2548, 31, 2548, 35): 'self', (2548, 37, 2548, 48): 'iprot.trans', (2548, 50, 2548, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2598, 6, 2598, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2598, 31, 2598, 35): 'self', (2598, 37, 2598, 48): 'iprot.trans', (2598, 50, 2598, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2649, 6, 2649, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2649, 31, 2649, 35): 'self', (2649, 37, 2649, 48): 'iprot.trans', (2649, 50, 2649, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2699, 6, 2699, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2699, 31, 2699, 35): 'self', (2699, 37, 2699, 48): 'iprot.trans', (2699, 50, 2699, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2760, 6, 2760, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2760, 31, 2760, 35): 'self', (2760, 37, 2760, 48): 'iprot.trans', (2760, 50, 2760, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2822, 6, 2822, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2822, 31, 2822, 35): 'self', (2822, 37, 2822, 48): 'iprot.trans', (2822, 50, 2822, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2893, 6, 2893, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2893, 31, 2893, 35): 'self', (2893, 37, 2893, 48): 'iprot.trans', (2893, 50, 2893, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2955, 6, 2955, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(2955, 31, 2955, 35): 'self', (2955, 37, 2955, 48): 'iprot.trans', (2955, 50, 2955, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((3025, 6, 3025, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(3025, 31, 3025, 35): 'self', (3025, 37, 3025, 48): 'iprot.trans', (3025, 50, 3025, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((3087, 6, 3087, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(3087, 31, 3087, 35): 'self', (3087, 37, 3087, 48): 'iprot.trans', (3087, 50, 3087, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((3158, 6, 3158, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(3158, 31, 3158, 35): 'self', (3158, 37, 3158, 48): 'iprot.trans', (3158, 50, 3158, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((3220, 6, 3220, 85), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', ({(3220, 31, 3220, 35): 'self', (3220, 37, 3220, 48): 'iprot.trans', (3220, 50, 3220, 84): '(self.__class__, self.thrift_spec)'}, {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1047, 24, 1047, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1047, 49, 1047, 53): 'self', (1047, 55, 1047, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1129, 24, 1129, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1129, 49, 1129, 53): 'self', (1129, 55, 1129, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1227, 24, 1227, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1227, 49, 1227, 53): 'self', (1227, 55, 1227, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1313, 24, 1313, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1313, 49, 1313, 53): 'self', (1313, 55, 1313, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1377, 24, 1377, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1377, 49, 1377, 53): 'self', (1377, 55, 1377, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1438, 24, 1438, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1438, 49, 1438, 53): 'self', (1438, 55, 1438, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1507, 24, 1507, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1507, 49, 1507, 53): 'self', (1507, 55, 1507, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1572, 24, 1572, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1572, 49, 1572, 53): 'self', (1572, 55, 1572, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1632, 24, 1632, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1632, 49, 1632, 53): 'self', (1632, 55, 1632, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1693, 24, 1693, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1693, 49, 1693, 53): 'self', (1693, 55, 1693, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1753, 24, 1753, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1753, 49, 1753, 53): 'self', (1753, 55, 1753, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1814, 24, 1814, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1814, 49, 1814, 53): 'self', (1814, 55, 1814, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1883, 24, 1883, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1883, 49, 1883, 53): 'self', (1883, 55, 1883, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((1957, 24, 1957, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(1957, 49, 1957, 53): 'self', (1957, 55, 1957, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2007, 24, 2007, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2007, 49, 2007, 53): 'self', (2007, 55, 2007, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2062, 24, 2062, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2062, 49, 2062, 53): 'self', (2062, 55, 2062, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2130, 24, 2130, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2130, 49, 2130, 53): 'self', (2130, 55, 2130, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2180, 24, 2180, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2180, 49, 2180, 53): 'self', (2180, 55, 2180, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2236, 24, 2236, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2236, 49, 2236, 53): 'self', (2236, 55, 2236, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2282, 24, 2282, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2282, 49, 2282, 53): 'self', (2282, 55, 2282, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2338, 24, 2338, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2338, 49, 2338, 53): 'self', (2338, 55, 2338, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2397, 24, 2397, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2397, 49, 2397, 53): 'self', (2397, 55, 2397, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2457, 24, 2457, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2457, 49, 2457, 53): 'self', (2457, 55, 2457, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2516, 24, 2516, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2516, 49, 2516, 53): 'self', (2516, 55, 2516, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2562, 24, 2562, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2562, 49, 2562, 53): 'self', (2562, 55, 2562, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2617, 24, 2617, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2617, 49, 2617, 53): 'self', (2617, 55, 2617, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2663, 24, 2663, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2663, 49, 2663, 53): 'self', (2663, 55, 2663, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2719, 24, 2719, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2719, 49, 2719, 53): 'self', (2719, 55, 2719, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2779, 24, 2779, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2779, 49, 2779, 53): 'self', (2779, 55, 2779, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2848, 24, 2848, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2848, 49, 2848, 53): 'self', (2848, 55, 2848, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2912, 24, 2912, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2912, 49, 2912, 53): 'self', (2912, 55, 2912, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((2980, 24, 2980, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(2980, 49, 2980, 53): 'self', (2980, 55, 2980, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((3044, 24, 3044, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(3044, 49, 3044, 53): 'self', (3044, 55, 3044, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((3113, 24, 3113, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(3113, 49, 3113, 53): 'self', (3113, 55, 3113, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((3177, 24, 3177, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(3177, 49, 3177, 53): 'self', (3177, 55, 3177, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n'), ((3246, 24, 3246, 90), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', ({(3246, 49, 3246, 53): 'self', (3246, 55, 3246, 89): '(self.__class__, self.thrift_spec)'}, {}), '(self, (self.__class__, self.thrift_spec))', False, 'from thrift.protocol import fastbinary\n')] |
philipwfowler/genucator | gemucator/__init__.py | d43a79afe1aa81ca24d7ab4370ed230e08aa89bf | #! /usr/bin/env python
from .core import gemucator
| [] |
daniel-waruo/e-commerse-api | client/checkout/schema/types.py | 6b080039398fb4099a34335317d649dd67783f63 | import graphene
from graphene_django import DjangoObjectType
from graphene_django.converter import convert_django_field
from pyuploadcare.dj.models import ImageField
| [] |
PlantDr430/CSU_scripts | pangenome_fluidity.py | 8ed9e1dc014b099ce68d77ce5c8747217c230e61 | #!/usr/bin/python3
'''
This script follows formulas put forth in Kislyuk et al. (2011) to calculate genome
fluidity of a pangenome dataset. Variance and standard error are estimated as total
variance containing both the variance due to subsampling all possible combinations
(without replacement) of N genomes from the total pool of genomes and the variance
due to the limited number of sampled genomes (variance of the pangenome)(Kislyuk et al. 2011).
However, the script has a default max number of subsamples set to 250,000 for each N genomes.
This can be altered with the -max_sub / --max_subsamples flag or turned off with the --max_off flag.
Turning the max_off will force calculations to be done on all possible subsample combinations
of N genomes. For samples of N genomes that were stopped at the max number of subsamples the subsamples
are sampled WITH replacement and variance is calculated with a degree of freedom = 1 (i.e. n - 1).
Results are a text file of fluidity, variance, and standard error for all N genome samples
and a figure of pangenome fluidity with shaded regions showing total standard error with a
exponential regression fit.
Notes
1. This will only work if you have at least 5 isolates to make up your pangenome.
2. If you have 5 isolates your graph will probably not look pretty as it's difficult
to fit with such a low number of samples.
'''
import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from multiprocessing import Pool
from itertools import combinations
from collections import OrderedDict
from collections.abc import Iterable
from scipy.optimize import curve_fit, differential_evolution
rundir = os.getcwd()
class MyFormatter(argparse.RawTextHelpFormatter):
def __init__(self, prog):
super(MyFormatter, self).__init__(prog, max_help_position=48)
parser = argparse.ArgumentParser(
usage='./%(prog)s [options] -i orthogroups -o output_folder',
description = ''' Performs multiple bootstraps and calculates genome fluidity
from a pangenome dataset (orthogroups).''',
epilog = """Written by Stephen A. Wyka (2019)""",
formatter_class = MyFormatter)
parser.add_argument(
'-i',
'--input',
required = True,
help = 'Orthogroups file, see format in READ.me',
metavar=''
)
parser.add_argument(
'-o',
'--out',
required = True,
help = 'Output folder',
metavar=''
)
parser.add_argument(
'-c',
'--cpus',
type=int,
default=1,
help = 'Number of cores to use for multiprocessing [default: 1]',
metavar=''
)
parser.add_argument(
'-max_sub',
'--max_subsamples',
type=int,
default=250000,
help = 'Max number of subsamples to run on N genomes sampled. [default: 250000]',
metavar=''
)
parser.add_argument(
'--max_off',
action='store_true',
help = 'Turn off the max subsamples. This will cause the script sample ALL possible combinations'\
'for N genomes',
)
parser.add_argument(
'-p',
'--prefix',
help = 'Prefix to append to the result files (such as Genus, species, etc.)',
metavar=''
)
args=parser.parse_args()
if not os.path.isdir(args.out):
os.makedirs(os.path.join(args.out))
result_dir = os.path.abspath(os.path.join(rundir, args.out))
if args.input:
input_file = os.path.abspath(args.input)
else:
print('ERROR: No orthogroups file was provided please provide on, -i or --input')
sys.exit()
if args.prefix:
fluid_results = os.path.abspath(os.path.join(result_dir, args.prefix+'_fluidity.txt'))
fluid_fig = os.path.abspath(os.path.join(result_dir, args.prefix+'_fluidity.png'))
else:
fluid_results = os.path.abspath(os.path.join(result_dir, 'Pangenome_fluidity.txt'))
fluid_fig = os.path.abspath(os.path.join(result_dir, 'Pangenome_fluidity.png'))
def create_ortho_dictionary(ortho_file): # create dictionary of gene clusters and isolates per cluster
'''Genereate dictionary of Orthogroups.'''
print('Creating ortholog dictionary')
ortho_isolates_dict = OrderedDict() # {Protein Cluster : list of isolates represented in cluster}
with open(ortho_file, 'r') as infile:
ortho_list = [item.strip() for item in sorted(infile)]
for line in ortho_list:
iso_list = []
if ':' in line:
cluster, genes = line.split(':')
elif '\t' in line:
cluster, genes = line.split('\t', 1)
else:
cluster, genes = line.split(' ', 1)
for match in re.finditer(r'([^\s]+)', genes):
isolate = match.group(0).split('_')[0]
iso_list.append(isolate)
ortho_isolates_dict[cluster] = list(set(iso_list))
return ortho_isolates_dict
def create_pair_dictionary(ortho_dictionary):
'''Create all possible unique pairs of isolates and get their unique
sum gene clusters.'''
print('Creating dictionary of paired ratio values')
pair_dict = {} # {(Isolate1, Isolate2) : [ratio of sum(unique clusters)/sum(all clusters)]}
for i in range(0, len(iso_list)):
for x in range(0, len(iso_list)):
if not iso_list[i] == iso_list[x]:
pair = tuple(sorted([iso_list[i], iso_list[x]]))
if not pair in pair_dict.keys():
cogs = {'Shared' : 0, 'Uk' : 0, 'Ul' : 0}
for k,v in ortho_dictionary.items():
if pair[0] in v and pair[1] in v:
cogs['Shared'] += 1
elif pair[0] in v and pair[1] not in v:
cogs['Uk'] += 1
elif pair[0] not in v and pair[1] in v:
cogs['Ul'] += 1
else:
pass # don't need to count a cluster if both isolates are not present
unique_pair = cogs['Uk'] + cogs['Ul']
all_pair = (cogs['Uk'] + cogs['Shared']) + (cogs['Ul'] + cogs['Shared'])
pair_dict[pair] = unique_pair/all_pair
return pair_dict
def compute_fluidity_all_genomes():
'''
Computes the fluidity and variance for the pangenome in question from the max number
of genomes in the pangenome.
'''
N = iso_num
fluidity_list = [ratio for ratio in pair_dict.values()] # list of ratios
pangenome_fluidity = (2/(N*(N-1)))*sum(fluidity_list) # get fluidity from average of all ratios
jack_samples = list(combinations(iso_list, N - 1)) # get list of all combos of N-1 from max num of genomes
fluidity_i_list = []
for sample in jack_samples:
jack_pairs = tuple(combinations(sample,2)) # get all pairs from current jackknife sample
jack_sample_fluidity = [pair_dict[tuple(sorted(p))] for p in jack_pairs] # get ratios from pair_dict
fluidity_i = (2/((N-1)*(N-2)))*sum(jack_sample_fluidity) # calculate fluidity_i
fluidity_i_list.append(fluidity_i)
fluidity_i_mean = np.mean(fluidity_i_list) # calculate fluidity_i_mean from all fluidity_i's
fluidity_variance = ((N-1)/N)*sum([(i-fluidity_i_mean)**2 for i in fluidity_i_list]) # calculate variance
return pangenome_fluidity, fluidity_variance
def subsample_multiprocess(combo_list):
'''
Takes portions of the full combo_list and runs them on separate threads for faster processing.
Calcualtes fluidity for each sample and returns list of fluidities.
'''
N = len(combo_list[0]) # get N from number of genomes present
sample_process_list = []
for sample in combo_list:
pairs = tuple(combinations(sample,2))
pair_fluidity_list = [pair_dict[tuple(sorted(p))] for p in pairs]
sample_fluidity = (2/(N*(N-1)))*sum(pair_fluidity_list)
sample_process_list.append(sample_fluidity)
return sample_process_list
def genome_subsamples_fluidities(perm_list):
'''
Compute fluidities from all possible combinations of genomes from 3 to N randomly sampled genomes
(N is the max number of gneomes in sample, so only sampled once). Has a cut off of max subsamples
at which point variances are calcualted as sample variances (n-1) instead of full population
variances.
'''
sub_fluid_dict = {} # {N genomes sampled : [list of fluidities from subsamples]}
for N in range(3, iso_num + 1):
sub_fluid_dict[N] = []
N_combos = list(combinations(iso_list, N))
if args.max_off:
combos = N_combos
else:
if len(N_combos) > args.max_subsamples:
combos = random.choices(N_combos, k=args.max_subsamples)
perm_list.append(N)
else:
combos = N_combos
print('Performing fluidity calculations on {} subsample combinations of {} genomes'.format(len(combos),N))
if not len(N_combos) == 1:
chunk = round(len(combos)/args.cpus)
split_combos = [combos[i:i + chunk] for i in range(0, len(combos), chunk)]
pool = Pool(processes=args.cpus)
results = pool.imap(subsample_multiprocess, split_combos)
pool.close()
pool.join()
sub_fluid_dict[N].append(results)
else:
last_run = subsample_multiprocess(N_combos)
sub_fluid_dict[N].append(last_run)
sub_fluid_dict[N]=list(flatten(sub_fluid_dict[N]))
print(len(sub_fluid_dict[N]))
return sub_fluid_dict
def flatten(lis):
for item in lis:
if isinstance(item, Iterable) and not isinstance(item, str):
for x in flatten(item):
yield x
else:
yield item
def exponential(x, a, b, c):
return a * np.exp(b * x) + c
def neg_exponential(x, a, b, c):
return a * np.exp(-b * x) + c
def sumOfSquaredError(parameterTuple, x_values, y_curve_values, func):
warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
val = func(x_values, *parameterTuple)
return np.sum((y_curve_values - val) ** 2.0)
def generate_Initial_Parameters(x_values, y_curve_values, func):
# min and max used for bounds
maxX = max(x_values)
minX = min(x_values)
maxY = max(y_curve_values)
minY = min(y_curve_values)
maxXY = max(maxX, maxY)
parameterBounds = []
parameterBounds.append([-maxXY, maxXY]) # seach bounds for a
parameterBounds.append([-maxXY, maxXY]) # seach bounds for b
parameterBounds.append([-maxXY, maxXY]) # seach bounds for c
# "seed" the numpy random number generator for repeatable results
result = differential_evolution(sumOfSquaredError, parameterBounds, args=(x_values,y_curve_values, func), seed=3)
return result.x
def create_fluidity_results(figure_output, results_output):
total_variance = []
for i in range(3, iso_num + 1):
if i in permutation_list:
total_variance.append(np.var(sub_fluid_dict[i], ddof = 1) + pan_variance)
else:
total_variance.append(np.var(sub_fluid_dict[i]) + pan_variance)
total_variance = np.array(total_variance)
total_stderr = np.array([x**(1/2) for x in total_variance])
y_fluidity_values = np.array([pan_fluidity for i in range(3, iso_num + 1)])
x_labels = np.array([i for i in range(3, iso_num + 1)])
stderr_bottom = np.array([(pan_fluidity - v) for v in total_stderr])
stderr_top = np.array([(pan_fluidity + v) for v in total_stderr])
fig, ax = plt.subplots()
try: # Still had problems sometimes with fitting curves, this solution works best for now
geneticParameters_top = generate_Initial_Parameters(x_labels, stderr_top, exponential)
geneticParameters_bottom = generate_Initial_Parameters(x_labels, stderr_bottom, exponential)
popt_t, pcov = curve_fit(exponential, x_labels, stderr_top, geneticParameters_top, maxfev=10000)
popt_b, pcov = curve_fit(exponential, x_labels, stderr_bottom, geneticParameters_bottom, maxfev=10000)
if len(set(exponential(x_labels, *popt_t))) > 3 and len(set(exponential(x_labels, *popt_b))) > 3:
plt.fill_between(x_labels, exponential(x_labels, *popt_t), exponential(x_labels, *popt_b), facecolor='blue', alpha=0.6)
top_curve = exponential(x_labels, *popt_t)
bottom_curve = exponential(x_labels, *popt_b)
if len(set(exponential(x_labels, *popt_t))) <= 3:
geneticParameters_top = generate_Initial_Parameters(x_labels, stderr_top, neg_exponential)
popt_t, pcov = curve_fit(neg_exponential, x_labels, stderr_top, geneticParameters_top, maxfev=10000)
plt.fill_between(x_labels, neg_exponential(x_labels, *popt_t), exponential(x_labels, *popt_b), facecolor='blue', alpha=0.6)
top_curve = neg_exponential(x_labels, *popt_t)
bottom_curve = exponential(x_labels, *popt_b)
else:
pass
if len(set(exponential(x_labels, *popt_b))) <= 3:
geneticParameters_bottom = generate_Initial_Parameters(x_labels, stderr_bottom, neg_exponential)
popt_b, pcov = curve_fit(neg_exponential, x_labels, stderr_bottom, geneticParameters_bottom, maxfev=10000)
plt.fill_between(x_labels, exponential(x_labels, *popt_t), neg_exponential(x_labels, *popt_b), facecolor='blue', alpha=0.6)
top_curve = exponential(x_labels, *popt_t)
bottom_curve = neg_exponential(x_labels, *popt_b)
else:
pass
except:
pass
ax.set_axisbelow(True)
plt.minorticks_on()
plt.grid(which='minor', axis='y', color='white', linestyle='--', alpha=0.3)
ax.yaxis.grid(True, linestyle='-', linewidth='1', which='major', color='white')
ax.xaxis.grid(True, linestyle='-', linewidth='1', which='major', color='white', alpha=0.5)
ax.tick_params(axis='x', which='minor', bottom=False)
ax.set_facecolor('gainsboro')
plt.plot(x_labels, y_fluidity_values, ls='--', lw=1, color='black') # plot y-values of fluidity
plt.xticks(np.arange(x_labels[0], x_labels[len(x_labels)-1]+1, 1.0)) # make sure x interval is 1
plt.xlim(x_labels[0], x_labels[len(x_labels)-1]) # adjust x limit so it starts with 3 at 0
max_y = max(stderr_top)
min_y = min(stderr_bottom)
plt.ylim((min_y - min_y*0.15), (max_y + max_y*0.15))
plt.xlabel('Number of genomes sampled')
plt.ylabel('Fluidity, '+u'\u03C6')
plt.tight_layout()
plt.savefig(figure_output)
with open(results_output, 'w') as results: # print out fluidity results
results.write('Genomes_Sampled\tFluidity\tTotal_Variance\tTotal_Stderr\tExponential_top\tExponential_bottom\n')
r_out = []
for i in range(0, iso_num-2):
r_out.append([str(i+3), str(pan_fluidity), str(total_variance[i]), str(total_stderr[i]),
str(top_curve[i]), str(bottom_curve[i])])
for line in r_out:
results.write('\t'.join(line) + '\n')
if __name__ == "__main__":
ortho_dict = create_ortho_dictionary(input_file)
iso_num = max([len(v) for v in ortho_dict.values()])
iso_list = list(set(itertools.chain.from_iterable([v for v in ortho_dict.values() if len(v) == iso_num])))
pair_dict = create_pair_dictionary(ortho_dict)
pan_results = compute_fluidity_all_genomes()
pan_fluidity = pan_results[0]
pan_variance = pan_results[1]
permutation_list = []
sub_fluid_dict = genome_subsamples_fluidities(permutation_list)
create_fluidity_results(fluid_fig, fluid_results)
| [((34, 9, 34, 20), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((39, 9, 45, 34), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((91, 7, 91, 30), 'os.path.isdir', 'os.path.isdir', ({(91, 21, 91, 29): 'args.out'}, {}), '(args.out)', False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((93, 29, 93, 59), 'os.path.join', 'os.path.join', ({(93, 42, 93, 48): 'rundir', (93, 50, 93, 58): 'args.out'}, {}), '(rundir, args.out)', False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((96, 17, 96, 44), 'os.path.abspath', 'os.path.abspath', ({(96, 33, 96, 43): 'args.input'}, {}), '(args.input)', False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((99, 4, 99, 14), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((111, 26, 111, 39), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((168, 22, 168, 46), 'numpy.mean', 'np.mean', ({(168, 30, 168, 45): 'fluidity_i_list'}, {}), '(fluidity_i_list)', True, 'import numpy as np\n'), ((236, 4, 236, 37), 'warnings.filterwarnings', 'warnings.filterwarnings', ({(236, 28, 236, 36): '"""ignore"""'}, {}), "('ignore')", False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((238, 11, 238, 48), 'numpy.sum', 'np.sum', ({(238, 18, 238, 47): '((y_curve_values - val) ** 2.0)'}, {}), '((y_curve_values - val) ** 2.0)', True, 'import numpy as np\n'), ((253, 13, 253, 117), 'scipy.optimize.differential_evolution', 'differential_evolution', (), '', False, 'from scipy.optimize import curve_fit, differential_evolution\n'), ((263, 21, 263, 45), 'numpy.array', 'np.array', ({(263, 30, 263, 44): 'total_variance'}, {}), '(total_variance)', True, 'import numpy as np\n'), ((264, 19, 264, 63), 'numpy.array', 'np.array', ({(264, 28, 264, 62): '[(x ** (1 / 2)) for x in total_variance]'}, {}), '([(x ** (1 / 2)) for x in total_variance])', True, 'import numpy as np\n'), ((267, 20, 267, 72), 'numpy.array', 'np.array', ({(267, 29, 267, 71): '[(pan_fluidity - v) for v in total_stderr]'}, {}), '([(pan_fluidity - v) for v in total_stderr])', True, 'import numpy as np\n'), ((268, 17, 268, 69), 'numpy.array', 'np.array', ({(268, 26, 268, 68): '[(pan_fluidity + v) for v in total_stderr]'}, {}), '([(pan_fluidity + v) for v in total_stderr])', True, 'import numpy as np\n'), ((269, 14, 269, 28), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((298, 4, 298, 23), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((299, 4, 299, 79), 'matplotlib.pyplot.grid', 'plt.grid', (), '', True, 'import matplotlib.pyplot as plt\n'), ((304, 4, 304, 71), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((309, 4, 309, 56), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(309, 14, 309, 32): '(min_y - min_y * 0.15)', (309, 36, 309, 54): '(max_y + max_y * 0.15)'}, {}), '(min_y - min_y * 0.15, max_y + max_y * 0.15)', True, 'import matplotlib.pyplot as plt\n'), ((310, 4, 310, 43), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(310, 15, 310, 42): '"""Number of genomes sampled"""'}, {}), "('Number of genomes sampled')", True, 'import matplotlib.pyplot as plt\n'), ((311, 4, 311, 38), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(311, 15, 311, 37): "('Fluidity, ' + u'φ')"}, {}), "('Fluidity, ' + u'φ')", True, 'import matplotlib.pyplot as plt\n'), ((312, 4, 312, 22), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((313, 4, 313, 30), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(313, 16, 313, 29): 'figure_output'}, {}), '(figure_output)', True, 'import matplotlib.pyplot as plt\n'), ((92, 16, 92, 38), 'os.path.join', 'os.path.join', ({(92, 29, 92, 37): 'args.out'}, {}), '(args.out)', False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((102, 36, 102, 89), 'os.path.join', 'os.path.join', ({(102, 49, 102, 59): 'result_dir', (102, 61, 102, 88): "args.prefix + '_fluidity.txt'"}, {}), "(result_dir, args.prefix + '_fluidity.txt')", False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((103, 32, 103, 85), 'os.path.join', 'os.path.join', ({(103, 45, 103, 55): 'result_dir', (103, 57, 103, 84): "args.prefix + '_fluidity.png'"}, {}), "(result_dir, args.prefix + '_fluidity.png')", False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((105, 36, 105, 86), 'os.path.join', 'os.path.join', ({(105, 49, 105, 59): 'result_dir', (105, 61, 105, 85): '"""Pangenome_fluidity.txt"""'}, {}), "(result_dir, 'Pangenome_fluidity.txt')", False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((106, 32, 106, 82), 'os.path.join', 'os.path.join', ({(106, 45, 106, 55): 'result_dir', (106, 57, 106, 81): '"""Pangenome_fluidity.png"""'}, {}), "(result_dir, 'Pangenome_fluidity.png')", False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((161, 24, 161, 53), 'itertools.combinations', 'combinations', ({(161, 37, 161, 45): 'iso_list', (161, 47, 161, 52): 'N - 1'}, {}), '(iso_list, N - 1)', False, 'from itertools import combinations\n'), ((273, 23, 273, 104), 'scipy.optimize.curve_fit', 'curve_fit', (), '', False, 'from scipy.optimize import curve_fit, differential_evolution\n'), ((274, 23, 274, 110), 'scipy.optimize.curve_fit', 'curve_fit', (), '', False, 'from scipy.optimize import curve_fit, differential_evolution\n'), ((122, 25, 122, 56), 're.finditer', 're.finditer', ({(122, 37, 122, 48): '"""([^\\\\s]+)"""', (122, 50, 122, 55): 'genes'}, {}), "('([^\\\\s]+)', genes)", False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((164, 27, 164, 49), 'itertools.combinations', 'combinations', ({(164, 40, 164, 46): 'sample', (164, 47, 164, 48): '2'}, {}), '(sample, 2)', False, 'from itertools import combinations\n'), ((180, 22, 180, 44), 'itertools.combinations', 'combinations', ({(180, 35, 180, 41): 'sample', (180, 42, 180, 43): '2'}, {}), '(sample, 2)', False, 'from itertools import combinations\n'), ((196, 24, 196, 49), 'itertools.combinations', 'combinations', ({(196, 37, 196, 45): 'iso_list', (196, 47, 196, 48): 'N'}, {}), '(iso_list, N)', False, 'from itertools import combinations\n'), ((209, 19, 209, 44), 'multiprocessing.Pool', 'Pool', (), '', False, 'from multiprocessing import Pool\n'), ((230, 15, 230, 28), 'numpy.exp', 'np.exp', ({(230, 22, 230, 27): '(b * x)'}, {}), '(b * x)', True, 'import numpy as np\n'), ((233, 15, 233, 29), 'numpy.exp', 'np.exp', ({(233, 22, 233, 28): '(-b * x)'}, {}), '(-b * x)', True, 'import numpy as np\n'), ((281, 27, 281, 112), 'scipy.optimize.curve_fit', 'curve_fit', (), '', False, 'from scipy.optimize import curve_fit, differential_evolution\n'), ((289, 27, 289, 118), 'scipy.optimize.curve_fit', 'curve_fit', (), '', False, 'from scipy.optimize import curve_fit, differential_evolution\n'), ((201, 25, 201, 72), 'random.choices', 'random.choices', (), '', False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((260, 34, 260, 69), 'numpy.var', 'np.var', (), '', True, 'import numpy as np\n'), ((262, 34, 262, 59), 'numpy.var', 'np.var', ({(262, 41, 262, 58): 'sub_fluid_dict[i]'}, {}), '(sub_fluid_dict[i])', True, 'import numpy as np\n')] |
CCSGroupInternational/osvolbackup | osvolbackup/backup.py | d0d93812a729acdb6c961c6bdd1cc2cb5c9c87f5 | #
# This module provides the Instance class that encapsulate some complex server instances related operations
#
from __future__ import print_function
from json import loads
from neutronclient.v2_0 import client as neutron_client
from novaclient import client as nova_client
from cinderclient import client as cinder_client
from osvolbackup.server import ServerInstance, ServerNotFound
from osvolbackup.osauth import get_session, VERSION
from osvolbackup.verbose import vprint
from time import time, sleep
class BackupGroup(object):
max_secs_gbi = 300
poll_delay = 10
def __init__(self, serverName):
self.selected_metadata = None
self.selected_backups = []
self.selected_volumes = []
session = self.session = get_session()
self.neutron = neutron_client.Client(session=session)
self.nova = nova_client.Client(VERSION, session=session)
self.cinder = cinder_client.Client(VERSION, session=session)
try:
server = ServerInstance(serverName)
except ServerNotFound:
name = 'osvb_'+serverName
else:
name = 'osvb_'+server.instance.id
self.backup_list = self.cinder.backups.list(search_opts={"name": name})
self.volume_map = {}
if len(self.backup_list) == 0:
raise BackupNotFound(serverName)
# Load metadata from the backup description field
self.backup_meta_data = backup_meta_data = {}
for backup in self.backup_list:
meta_data = loads(backup.description)
backup_meta_data[backup.id] = meta_data
self.volume_map[backup.id] = {"id": backup.volume_id, "size": backup.size}
self.available_backups = sorted(set([b['backup_time'] for b in backup_meta_data.values()]))
def select_by_tag(self, tag):
if tag == 'last':
selected_backup_timestamp = self.available_backups[-1]
else:
raise BackupTooMany(tag)
# Get volumes associated with the selected backup
for backup_id, backup_meta in self.backup_meta_data.iteritems():
if backup_meta['backup_time'] == selected_backup_timestamp:
self.selected_backups.append(backup_id)
self.selected_volumes.append(self.volume_map[backup_id])
self.selected_metadata = backup_meta
def get_volumes(self):
return self.selected_volumes
def restore(self, server=None, network=None, to_project=None, skip_vm=False):
# flavor = self.nova.flavors.find(name=self.selected_metadata['flavor'])
new_volume_list = self._create_volumes(self.selected_volumes, to_project)
# Restore the volumes
block_device_mapping = {}
for i, backup_id in enumerate(self.selected_backups):
vol_index = self.backup_meta_data[backup_id]['vol_index']
new_volume_id = new_volume_list[i].id
vprint("Restoring from backup", backup_id, "to volume", new_volume_id)
dev_name = "vd" + chr(ord('a') + vol_index)
block_device_mapping[dev_name] = new_volume_id
restore = self.cinder.restores.restore(backup_id=backup_id, volume_id=new_volume_id)
restored_volume = self.cinder.volumes.get(restore.volume_id)
self._wait_for(restored_volume, ('restoring-backup',), 'available')
# We need to get again to refresh the metadata
restored_volume = self.cinder.volumes.get(restore.volume_id)
if vol_index == 0:
if not skip_vm:
name = restored_volume.metadata['osvb_name']
flavor = restored_volume.metadata['osvb_flavor']
flavor = self.nova.flavors.find(name=flavor) # name to id
saved_networks = loads(restored_volume.metadata['osvb_network'])
if not skip_vm:
nics = []
if network is not None:
net_name, net_ip = network.split("=")
net_id = self.neutron.list_networks(name=net_name)['networks'][0]['id']
nic_info = {'net-id': net_id, 'v4-fixed-ip': net_ip}
nics.append(nic_info)
else:
for network_name, network_ips in saved_networks.iteritems():
nic_info = {}
nic_info['net-id'] = self.neutron.list_networks(name=network_name)['networks'][0]['id']
nic_info['v4-fixed-ip'] = network_ips[0]
nics.append(nic_info)
target_session = get_session(to_project)
target_nova = nova_client.Client(VERSION, session=target_session)
server = target_nova.servers.create(
name=name, image=None, flavor=flavor, block_device_mapping=block_device_mapping, nics=nics
)
print("Server was restored into instance", server.id)
def _create_volumes(self, volume_list, to_project):
""" Create volumes based """
vprint("Creating volumes for the instance restore")
target_session = get_session(to_project)
target_cinder = cinder_client.Client(VERSION, session=target_session)
vol_list = []
for volume in volume_list:
vprint("Creating %dG volume" % volume['size'])
new_volume = target_cinder.volumes.create(volume['size'])
self._wait_for(new_volume, ('creating',), 'available')
vol_list.append(new_volume)
return vol_list
# Borrowed from https://github.com/Akrog/cinderback/blob/master/cinderback.py
def _wait_for(self, resource, allowed_states, expected_states=None, timeout=None):
"""Waits for a resource to come to a specific state.
:param resource: Resource we want to wait for
:param allowed_states: iterator with allowed intermediary states
:param expected_states: states we expect to have at the end, if None
is supplied then anything is good.
:param need_up: If wee need backup service to be up and running
:return: The most updated resource
"""
if timeout:
deadline = time() + timeout
else:
deadline = time() + (self.max_secs_gbi * resource.size)
while resource.status in allowed_states:
sleep(self.poll_delay)
if deadline <= time():
raise TimeoutError(what=resource)
resource = resource.manager.get(resource.id)
if expected_states and resource.status not in expected_states:
raise UnexpectedStatus(what=resource, intermediate=allowed_states, final=expected_states)
return resource
class BackupException(Exception):
def __init__(self, what, *args, **kwargs):
super(BackupException, self).__init__(*args, **kwargs)
self.what = what
def __str__(self):
return u'%s: %s' % (self.__class__.__name__, self.what)
class UnexpectedStatus(BackupException):
def __init__(self, what, intermediate='', final='', *args, **kwargs):
super(UnexpectedStatus, self).__init__(what, *args, **kwargs)
self.intermediate = intermediate
self.final = final
def __str__(self):
if self.intermediate or self.final:
steps = (' [intermediate: %s, final: %s]' % (self.intermediate, self.final))
else:
steps = ''
return (u'%s: Status is %s%s' %
(self.__class__.__name__, self.what.status, steps))
class BackupNotFound(BackupException):
pass
class BackupTooMany(BackupException):
pass
| [((25, 33, 25, 46), 'osvolbackup.osauth.get_session', 'get_session', ({}, {}), '()', False, 'from osvolbackup.osauth import get_session, VERSION\n'), ((26, 23, 26, 61), 'neutronclient.v2_0.client.Client', 'neutron_client.Client', (), '', True, 'from neutronclient.v2_0 import client as neutron_client\n'), ((27, 20, 27, 64), 'novaclient.client.Client', 'nova_client.Client', (), '', True, 'from novaclient import client as nova_client\n'), ((28, 22, 28, 68), 'cinderclient.client.Client', 'cinder_client.Client', (), '', True, 'from cinderclient import client as cinder_client\n'), ((113, 8, 113, 59), 'osvolbackup.verbose.vprint', 'vprint', ({(113, 15, 113, 58): '"""Creating volumes for the instance restore"""'}, {}), "('Creating volumes for the instance restore')", False, 'from osvolbackup.verbose import vprint\n'), ((114, 25, 114, 48), 'osvolbackup.osauth.get_session', 'get_session', ({(114, 37, 114, 47): 'to_project'}, {}), '(to_project)', False, 'from osvolbackup.osauth import get_session, VERSION\n'), ((115, 24, 115, 77), 'cinderclient.client.Client', 'cinder_client.Client', (), '', True, 'from cinderclient import client as cinder_client\n'), ((30, 21, 30, 47), 'osvolbackup.server.ServerInstance', 'ServerInstance', ({(30, 36, 30, 46): 'serverName'}, {}), '(serverName)', False, 'from osvolbackup.server import ServerInstance, ServerNotFound\n'), ((44, 24, 44, 49), 'json.loads', 'loads', ({(44, 30, 44, 48): 'backup.description'}, {}), '(backup.description)', False, 'from json import loads\n'), ((76, 12, 76, 82), 'osvolbackup.verbose.vprint', 'vprint', ({(76, 19, 76, 42): '"""Restoring from backup"""', (76, 44, 76, 53): 'backup_id', (76, 55, 76, 66): '"""to volume"""', (76, 68, 76, 81): 'new_volume_id'}, {}), "('Restoring from backup', backup_id, 'to volume', new_volume_id)", False, 'from osvolbackup.verbose import vprint\n'), ((104, 29, 104, 52), 'osvolbackup.osauth.get_session', 'get_session', ({(104, 41, 104, 51): 'to_project'}, {}), '(to_project)', False, 'from osvolbackup.osauth import get_session, VERSION\n'), ((105, 26, 105, 77), 'novaclient.client.Client', 'nova_client.Client', (), '', True, 'from novaclient import client as nova_client\n'), ((118, 12, 118, 58), 'osvolbackup.verbose.vprint', 'vprint', ({(118, 19, 118, 57): "('Creating %dG volume' % volume['size'])"}, {}), "('Creating %dG volume' % volume['size'])", False, 'from osvolbackup.verbose import vprint\n'), ((139, 12, 139, 34), 'time.sleep', 'sleep', ({(139, 18, 139, 33): 'self.poll_delay'}, {}), '(self.poll_delay)', False, 'from time import time, sleep\n'), ((135, 23, 135, 29), 'time.time', 'time', ({}, {}), '()', False, 'from time import time, sleep\n'), ((137, 23, 137, 29), 'time.time', 'time', ({}, {}), '()', False, 'from time import time, sleep\n'), ((140, 27, 140, 33), 'time.time', 'time', ({}, {}), '()', False, 'from time import time, sleep\n'), ((90, 37, 90, 84), 'json.loads', 'loads', ({(90, 43, 90, 83): "restored_volume.metadata['osvb_network']"}, {}), "(restored_volume.metadata['osvb_network'])", False, 'from json import loads\n')] |
JohannesBuchner/gammapy | gammapy/estimators/profile.py | 48769519f04b7df7b3e4580ebb61396445790bc3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tools to create profiles (i.e. 1D "slices" from 2D images)."""
import numpy as np
import scipy.ndimage
from astropy import units as u
from astropy.convolution import Box1DKernel, Gaussian1DKernel
from astropy.coordinates import Angle
from astropy.table import Table
from .core import Estimator
__all__ = ["ImageProfile", "ImageProfileEstimator"]
# TODO: implement measuring profile along arbitrary directions
# TODO: think better about error handling. e.g. MC based methods
class ImageProfileEstimator(Estimator):
"""Estimate profile from image.
Parameters
----------
x_edges : `~astropy.coordinates.Angle`
Coordinate edges to define a custom measument grid (optional).
method : ['sum', 'mean']
Compute sum or mean within profile bins.
axis : ['lon', 'lat', 'radial']
Along which axis to estimate the profile.
center : `~astropy.coordinates.SkyCoord`
Center coordinate for the radial profile option.
Examples
--------
This example shows how to compute a counts profile for the Fermi galactic
center region::
import matplotlib.pyplot as plt
from gammapy.maps import ImageProfileEstimator
from gammapy.maps import Map
from astropy import units as u
# load example data
filename = '$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts.fits.gz'
fermi_cts = Map.read(filename)
# set up profile estimator and run
p = ImageProfileEstimator(axis='lon', method='sum')
profile = p.run(fermi_cts)
# smooth profile and plot
smoothed = profile.smooth(kernel='gauss')
smoothed.peek()
plt.show()
"""
tag = "ImageProfileEstimator"
def __init__(self, x_edges=None, method="sum", axis="lon", center=None):
self._x_edges = x_edges
if method not in ["sum", "mean"]:
raise ValueError("Not a valid method, choose either 'sum' or 'mean'")
if axis not in ["lon", "lat", "radial"]:
raise ValueError("Not a valid axis, choose either 'lon' or 'lat'")
if method == "radial" and center is None:
raise ValueError("Please provide center coordinate for radial profiles")
self.parameters = {"method": method, "axis": axis, "center": center}
def _get_x_edges(self, image):
if self._x_edges is not None:
return self._x_edges
p = self.parameters
coordinates = image.geom.get_coord(mode="edges").skycoord
if p["axis"] == "lat":
x_edges = coordinates[:, 0].data.lat
elif p["axis"] == "lon":
lon = coordinates[0, :].data.lon
x_edges = lon.wrap_at("180d")
elif p["axis"] == "radial":
rad_step = image.geom.pixel_scales.mean()
corners = [0, 0, -1, -1], [0, -1, 0, -1]
rad_max = coordinates[corners].separation(p["center"]).max()
x_edges = Angle(np.arange(0, rad_max.deg, rad_step.deg), unit="deg")
return x_edges
def _estimate_profile(self, image, image_err, mask):
p = self.parameters
labels = self._label_image(image, mask)
profile_err = None
index = np.arange(1, len(self._get_x_edges(image)))
if p["method"] == "sum":
profile = scipy.ndimage.sum(image.data, labels.data, index)
if image.unit.is_equivalent("counts"):
profile_err = np.sqrt(profile)
elif image_err:
# gaussian error propagation
err_sum = scipy.ndimage.sum(image_err.data ** 2, labels.data, index)
profile_err = np.sqrt(err_sum)
elif p["method"] == "mean":
# gaussian error propagation
profile = scipy.ndimage.mean(image.data, labels.data, index)
if image_err:
N = scipy.ndimage.sum(~np.isnan(image_err.data), labels.data, index)
err_sum = scipy.ndimage.sum(image_err.data ** 2, labels.data, index)
profile_err = np.sqrt(err_sum) / N
return profile, profile_err
def _label_image(self, image, mask=None):
p = self.parameters
coordinates = image.geom.get_coord().skycoord
x_edges = self._get_x_edges(image)
if p["axis"] == "lon":
lon = coordinates.data.lon.wrap_at("180d")
data = np.digitize(lon.degree, x_edges.deg)
elif p["axis"] == "lat":
lat = coordinates.data.lat
data = np.digitize(lat.degree, x_edges.deg)
elif p["axis"] == "radial":
separation = coordinates.separation(p["center"])
data = np.digitize(separation.degree, x_edges.deg)
if mask is not None:
# assign masked values to background
data[mask.data] = 0
return image.copy(data=data)
def run(self, image, image_err=None, mask=None):
"""Run image profile estimator.
Parameters
----------
image : `~gammapy.maps.Map`
Input image to run profile estimator on.
image_err : `~gammapy.maps.Map`
Input error image to run profile estimator on.
mask : `~gammapy.maps.Map`
Optional mask to exclude regions from the measurement.
Returns
-------
profile : `ImageProfile`
Result image profile object.
"""
p = self.parameters
if image.unit.is_equivalent("count"):
image_err = image.copy(data=np.sqrt(image.data))
profile, profile_err = self._estimate_profile(image, image_err, mask)
result = Table()
x_edges = self._get_x_edges(image)
result["x_min"] = x_edges[:-1]
result["x_max"] = x_edges[1:]
result["x_ref"] = (x_edges[:-1] + x_edges[1:]) / 2
result["profile"] = profile * image.unit
if profile_err is not None:
result["profile_err"] = profile_err * image.unit
result.meta["PROFILE_TYPE"] = p["axis"]
return ImageProfile(result)
class ImageProfile:
"""Image profile class.
The image profile data is stored in `~astropy.table.Table` object, with the
following columns:
* `x_ref` Coordinate bin center (required).
* `x_min` Coordinate bin minimum (optional).
* `x_max` Coordinate bin maximum (optional).
* `profile` Image profile data (required).
* `profile_err` Image profile data error (optional).
Parameters
----------
table : `~astropy.table.Table`
Table instance with the columns specified as above.
"""
def __init__(self, table):
self.table = table
def smooth(self, kernel="box", radius="0.1 deg", **kwargs):
r"""Smooth profile with error propagation.
Smoothing is described by a convolution:
.. math::
x_j = \sum_i x_{(j - i)} h_i
Where :math:`h_i` are the coefficients of the convolution kernel.
The corresponding error on :math:`x_j` is then estimated using Gaussian
error propagation, neglecting correlations between the individual
:math:`x_{(j - i)}`:
.. math::
\Delta x_j = \sqrt{\sum_i \Delta x^{2}_{(j - i)} h^{2}_i}
Parameters
----------
kernel : {'gauss', 'box'}
Kernel shape
radius : `~astropy.units.Quantity`, str or float
Smoothing width given as quantity or float. If a float is given it
is interpreted as smoothing width in pixels. If an (angular) quantity
is given it is converted to pixels using `xref[1] - x_ref[0]`.
kwargs : dict
Keyword arguments passed to `~scipy.ndimage.uniform_filter`
('box') and `~scipy.ndimage.gaussian_filter` ('gauss').
Returns
-------
profile : `ImageProfile`
Smoothed image profile.
"""
table = self.table.copy()
profile = table["profile"]
radius = u.Quantity(radius)
radius = np.abs(radius / np.diff(self.x_ref))[0]
width = 2 * radius.value + 1
if kernel == "box":
smoothed = scipy.ndimage.uniform_filter(
profile.astype("float"), width, **kwargs
)
# renormalize data
if table["profile"].unit.is_equivalent("count"):
smoothed *= int(width)
smoothed_err = np.sqrt(smoothed)
elif "profile_err" in table.colnames:
profile_err = table["profile_err"]
# use gaussian error propagation
box = Box1DKernel(width)
err_sum = scipy.ndimage.convolve(profile_err ** 2, box.array ** 2)
smoothed_err = np.sqrt(err_sum)
elif kernel == "gauss":
smoothed = scipy.ndimage.gaussian_filter(
profile.astype("float"), width, **kwargs
)
# use gaussian error propagation
if "profile_err" in table.colnames:
profile_err = table["profile_err"]
gauss = Gaussian1DKernel(width)
err_sum = scipy.ndimage.convolve(profile_err ** 2, gauss.array ** 2)
smoothed_err = np.sqrt(err_sum)
else:
raise ValueError("Not valid kernel choose either 'box' or 'gauss'")
table["profile"] = smoothed * self.table["profile"].unit
if "profile_err" in table.colnames:
table["profile_err"] = smoothed_err * self.table["profile"].unit
return self.__class__(table)
def plot(self, ax=None, **kwargs):
"""Plot image profile.
Parameters
----------
ax : `~matplotlib.axes.Axes`
Axes object
**kwargs : dict
Keyword arguments passed to `~matplotlib.axes.Axes.plot`
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
y = self.table["profile"].data
x = self.x_ref.value
ax.plot(x, y, **kwargs)
ax.set_xlabel("lon")
ax.set_ylabel("profile")
ax.set_xlim(x.max(), x.min())
return ax
def plot_err(self, ax=None, **kwargs):
"""Plot image profile error as band.
Parameters
----------
ax : `~matplotlib.axes.Axes`
Axes object
**kwargs : dict
Keyword arguments passed to plt.fill_between()
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
y = self.table["profile"].data
ymin = y - self.table["profile_err"].data
ymax = y + self.table["profile_err"].data
x = self.x_ref.value
# plotting defaults
kwargs.setdefault("alpha", 0.5)
ax.fill_between(x, ymin, ymax, **kwargs)
ax.set_xlabel("x (deg)")
ax.set_ylabel("profile")
return ax
@property
def x_ref(self):
"""Reference x coordinates."""
return self.table["x_ref"].quantity
@property
def x_min(self):
"""Min. x coordinates."""
return self.table["x_min"].quantity
@property
def x_max(self):
"""Max. x coordinates."""
return self.table["x_max"].quantity
@property
def profile(self):
"""Image profile quantity."""
return self.table["profile"].quantity
@property
def profile_err(self):
"""Image profile error quantity."""
try:
return self.table["profile_err"].quantity
except KeyError:
return None
def peek(self, figsize=(8, 4.5), **kwargs):
"""Show image profile and error.
Parameters
----------
**kwargs : dict
Keyword arguments passed to `ImageProfile.plot_profile()`
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax = self.plot(ax, **kwargs)
if "profile_err" in self.table.colnames:
ax = self.plot_err(ax, color=kwargs.get("c"))
return ax
def normalize(self, mode="peak"):
"""Normalize profile to peak value or integral.
Parameters
----------
mode : ['integral', 'peak']
Normalize image profile so that it integrates to unity ('integral')
or the maximum value corresponds to one ('peak').
Returns
-------
profile : `ImageProfile`
Normalized image profile.
"""
table = self.table.copy()
profile = self.table["profile"]
if mode == "peak":
norm = np.nanmax(profile)
elif mode == "integral":
norm = np.nansum(profile)
else:
raise ValueError(f"Invalid normalization mode: {mode!r}")
table["profile"] /= norm
if "profile_err" in table.colnames:
table["profile_err"] /= norm
return self.__class__(table)
| [((166, 17, 166, 24), 'astropy.table.Table', 'Table', ({}, {}), '()', False, 'from astropy.table import Table\n'), ((239, 17, 239, 35), 'astropy.units.Quantity', 'u.Quantity', ({(239, 28, 239, 34): 'radius'}, {}), '(radius)', True, 'from astropy import units as u\n'), ((379, 14, 379, 41), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((126, 19, 126, 55), 'numpy.digitize', 'np.digitize', ({(126, 31, 126, 41): 'lon.degree', (126, 43, 126, 54): 'x_edges.deg'}, {}), '(lon.degree, x_edges.deg)', True, 'import numpy as np\n'), ((293, 17, 293, 26), 'matplotlib.pyplot.gca', 'plt.gca', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((321, 17, 321, 26), 'matplotlib.pyplot.gca', 'plt.gca', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((405, 19, 405, 37), 'numpy.nanmax', 'np.nanmax', ({(405, 29, 405, 36): 'profile'}, {}), '(profile)', True, 'import numpy as np\n'), ((102, 30, 102, 46), 'numpy.sqrt', 'np.sqrt', ({(102, 38, 102, 45): 'profile'}, {}), '(profile)', True, 'import numpy as np\n'), ((130, 19, 130, 55), 'numpy.digitize', 'np.digitize', ({(130, 31, 130, 41): 'lat.degree', (130, 43, 130, 54): 'x_edges.deg'}, {}), '(lat.degree, x_edges.deg)', True, 'import numpy as np\n'), ((250, 31, 250, 48), 'numpy.sqrt', 'np.sqrt', ({(250, 39, 250, 47): 'smoothed'}, {}), '(smoothed)', True, 'import numpy as np\n'), ((407, 19, 407, 37), 'numpy.nansum', 'np.nansum', ({(407, 29, 407, 36): 'profile'}, {}), '(profile)', True, 'import numpy as np\n'), ((106, 30, 106, 46), 'numpy.sqrt', 'np.sqrt', ({(106, 38, 106, 45): 'err_sum'}, {}), '(err_sum)', True, 'import numpy as np\n'), ((134, 19, 134, 62), 'numpy.digitize', 'np.digitize', ({(134, 31, 134, 48): 'separation.degree', (134, 50, 134, 61): 'x_edges.deg'}, {}), '(separation.degree, x_edges.deg)', True, 'import numpy as np\n'), ((162, 40, 162, 59), 'numpy.sqrt', 'np.sqrt', ({(162, 48, 162, 58): 'image.data'}, {}), '(image.data)', True, 'import numpy as np\n'), ((240, 33, 240, 52), 'numpy.diff', 'np.diff', ({(240, 41, 240, 51): 'self.x_ref'}, {}), '(self.x_ref)', True, 'import numpy as np\n'), ((254, 22, 254, 40), 'astropy.convolution.Box1DKernel', 'Box1DKernel', ({(254, 34, 254, 39): 'width'}, {}), '(width)', False, 'from astropy.convolution import Box1DKernel, Gaussian1DKernel\n'), ((256, 31, 256, 47), 'numpy.sqrt', 'np.sqrt', ({(256, 39, 256, 46): 'err_sum'}, {}), '(err_sum)', True, 'import numpy as np\n'), ((264, 24, 264, 47), 'astropy.convolution.Gaussian1DKernel', 'Gaussian1DKernel', ({(264, 41, 264, 46): 'width'}, {}), '(width)', False, 'from astropy.convolution import Box1DKernel, Gaussian1DKernel\n'), ((266, 31, 266, 47), 'numpy.sqrt', 'np.sqrt', ({(266, 39, 266, 46): 'err_sum'}, {}), '(err_sum)', True, 'import numpy as np\n'), ((86, 28, 86, 67), 'numpy.arange', 'np.arange', ({(86, 38, 86, 39): '0', (86, 41, 86, 52): 'rad_max.deg', (86, 54, 86, 66): 'rad_step.deg'}, {}), '(0, rad_max.deg, rad_step.deg)', True, 'import numpy as np\n'), ((114, 30, 114, 46), 'numpy.sqrt', 'np.sqrt', ({(114, 38, 114, 45): 'err_sum'}, {}), '(err_sum)', True, 'import numpy as np\n'), ((112, 39, 112, 63), 'numpy.isnan', 'np.isnan', ({(112, 48, 112, 62): 'image_err.data'}, {}), '(image_err.data)', True, 'import numpy as np\n')] |
KATO-Hiro/AtCoder | ABC/abc001-abc050/abc007/b.py | cbbdb18e95110b604728a54aed83a6ed6b993fde | # -*- coding: utf-8 -*-
def main():
a = input()
# See:
# https://www.slideshare.net/chokudai/abc007
if a == 'a':
print('-1')
else:
print('a')
if __name__ == '__main__':
main()
| [] |
crimergio/linux_test | env/lib/python3.8/site-packages/versatileimagefield/mixins.py | 5e688a06884ab10b4eaaad10a5d0df417a1c9b31 | """versatileimagefield Field mixins."""
import os
import re
from .datastructures import FilterLibrary
from .registry import autodiscover, versatileimagefield_registry
from .settings import (
cache,
VERSATILEIMAGEFIELD_CREATE_ON_DEMAND,
VERSATILEIMAGEFIELD_SIZED_DIRNAME,
VERSATILEIMAGEFIELD_FILTERED_DIRNAME
)
from .validators import validate_ppoi
autodiscover()
filter_regex_snippet = r'__({registered_filters})__'.format(
registered_filters='|'.join([
key
for key, filter_cls in versatileimagefield_registry._filter_registry.items()
])
)
sizer_regex_snippet = r'-({registered_sizers})-(\d+)x(\d+)(?:-\d+)?'.format(
registered_sizers='|'.join([
sizer_cls.get_filename_key_regex()
for key, sizer_cls in versatileimagefield_registry._sizedimage_registry.items()
])
)
filter_regex = re.compile(filter_regex_snippet + '$')
sizer_regex = re.compile(sizer_regex_snippet + '$')
filter_and_sizer_regex = re.compile(
filter_regex_snippet + sizer_regex_snippet + '$'
)
class VersatileImageMixIn(object):
"""A mix-in that provides the filtering/sizing API."""
def __init__(self, *args, **kwargs):
"""Construct PPOI and create_on_demand."""
self._create_on_demand = VERSATILEIMAGEFIELD_CREATE_ON_DEMAND
super(VersatileImageMixIn, self).__init__(*args, **kwargs)
# Setting initial ppoi
if self.field.ppoi_field:
instance_ppoi_value = getattr(
self.instance,
self.field.ppoi_field,
(0.5, 0.5)
)
self.ppoi = instance_ppoi_value
else:
self.ppoi = (0.5, 0.5)
@property
def url(self):
"""
Return the appropriate URL.
URL is constructed based on these field conditions:
* If empty (not `self.name`) and a placeholder is defined, the
URL to the placeholder is returned.
* Otherwise, defaults to vanilla ImageFieldFile behavior.
"""
if not self.name and self.field.placeholder_image_name:
return self.storage.url(self.field.placeholder_image_name)
return super(VersatileImageMixIn, self).url
@property
def create_on_demand(self):
"""create_on_demand getter."""
return self._create_on_demand
@create_on_demand.setter
def create_on_demand(self, value):
if not isinstance(value, bool):
raise ValueError(
"`create_on_demand` must be a boolean"
)
else:
self._create_on_demand = value
self.build_filters_and_sizers(self.ppoi, value)
@property
def ppoi(self):
"""Primary Point of Interest (ppoi) getter."""
return self._ppoi_value
@ppoi.setter
def ppoi(self, value):
"""Primary Point of Interest (ppoi) setter."""
ppoi = validate_ppoi(
value,
return_converted_tuple=True
)
if ppoi is not False:
self._ppoi_value = ppoi
self.build_filters_and_sizers(ppoi, self.create_on_demand)
def build_filters_and_sizers(self, ppoi_value, create_on_demand):
"""Build the filters and sizers for a field."""
name = self.name
if not name and self.field.placeholder_image_name:
name = self.field.placeholder_image_name
self.filters = FilterLibrary(
name,
self.storage,
versatileimagefield_registry,
ppoi_value,
create_on_demand
)
for (
attr_name,
sizedimage_cls
) in versatileimagefield_registry._sizedimage_registry.items():
setattr(
self,
attr_name,
sizedimage_cls(
path_to_image=name,
storage=self.storage,
create_on_demand=create_on_demand,
ppoi=ppoi_value
)
)
def get_filtered_root_folder(self):
"""Return the location where filtered images are stored."""
folder, filename = os.path.split(self.name)
return os.path.join(folder, VERSATILEIMAGEFIELD_FILTERED_DIRNAME, '')
def get_sized_root_folder(self):
"""Return the location where sized images are stored."""
folder, filename = os.path.split(self.name)
return os.path.join(VERSATILEIMAGEFIELD_SIZED_DIRNAME, folder, '')
def get_filtered_sized_root_folder(self):
"""Return the location where filtered + sized images are stored."""
sized_root_folder = self.get_sized_root_folder()
return os.path.join(
sized_root_folder,
VERSATILEIMAGEFIELD_FILTERED_DIRNAME
)
def delete_matching_files_from_storage(self, root_folder, regex):
"""
Delete files in `root_folder` which match `regex` before file ext.
Example values:
* root_folder = 'foo/'
* self.name = 'bar.jpg'
* regex = re.compile('-baz')
Result:
* foo/bar-baz.jpg <- Deleted
* foo/bar-biz.jpg <- Not deleted
"""
if not self.name: # pragma: no cover
return
try:
directory_list, file_list = self.storage.listdir(root_folder)
except OSError: # pragma: no cover
pass
else:
folder, filename = os.path.split(self.name)
basename, ext = os.path.splitext(filename)
for f in file_list:
if not f.startswith(basename) or not f.endswith(ext): # pragma: no cover
continue
tag = f[len(basename):-len(ext)]
assert f == basename + tag + ext
if regex.match(tag) is not None:
file_location = os.path.join(root_folder, f)
self.storage.delete(file_location)
cache.delete(
self.storage.url(file_location)
)
print(
"Deleted {file} (created from: {original})".format(
file=os.path.join(root_folder, f),
original=self.name
)
)
def delete_filtered_images(self):
"""Delete all filtered images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_filtered_root_folder(),
filter_regex
)
def delete_sized_images(self):
"""Delete all sized images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_sized_root_folder(),
sizer_regex
)
def delete_filtered_sized_images(self):
"""Delete all filtered sized images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_filtered_sized_root_folder(),
filter_and_sizer_regex
)
def delete_all_created_images(self):
"""Delete all images created from `self.name`."""
self.delete_filtered_images()
self.delete_sized_images()
self.delete_filtered_sized_images()
| [((29, 15, 29, 53), 're.compile', 're.compile', ({(29, 26, 29, 52): "filter_regex_snippet + '$'"}, {}), "(filter_regex_snippet + '$')", False, 'import re\n'), ((30, 14, 30, 51), 're.compile', 're.compile', ({(30, 25, 30, 50): "sizer_regex_snippet + '$'"}, {}), "(sizer_regex_snippet + '$')", False, 'import re\n'), ((31, 25, 33, 1), 're.compile', 're.compile', ({(32, 4, 32, 52): "filter_regex_snippet + sizer_regex_snippet + '$'"}, {}), "(filter_regex_snippet + sizer_regex_snippet + '$')", False, 'import re\n'), ((129, 27, 129, 51), 'os.path.split', 'os.path.split', ({(129, 41, 129, 50): 'self.name'}, {}), '(self.name)', False, 'import os\n'), ((130, 15, 130, 77), 'os.path.join', 'os.path.join', ({(130, 28, 130, 34): 'folder', (130, 36, 130, 72): 'VERSATILEIMAGEFIELD_FILTERED_DIRNAME', (130, 74, 130, 76): '""""""'}, {}), "(folder, VERSATILEIMAGEFIELD_FILTERED_DIRNAME, '')", False, 'import os\n'), ((134, 27, 134, 51), 'os.path.split', 'os.path.split', ({(134, 41, 134, 50): 'self.name'}, {}), '(self.name)', False, 'import os\n'), ((135, 15, 135, 74), 'os.path.join', 'os.path.join', ({(135, 28, 135, 61): 'VERSATILEIMAGEFIELD_SIZED_DIRNAME', (135, 63, 135, 69): 'folder', (135, 71, 135, 73): '""""""'}, {}), "(VERSATILEIMAGEFIELD_SIZED_DIRNAME, folder, '')", False, 'import os\n'), ((140, 15, 143, 9), 'os.path.join', 'os.path.join', ({(141, 12, 141, 29): 'sized_root_folder', (142, 12, 142, 48): 'VERSATILEIMAGEFIELD_FILTERED_DIRNAME'}, {}), '(sized_root_folder, VERSATILEIMAGEFIELD_FILTERED_DIRNAME)', False, 'import os\n'), ((165, 31, 165, 55), 'os.path.split', 'os.path.split', ({(165, 45, 165, 54): 'self.name'}, {}), '(self.name)', False, 'import os\n'), ((166, 28, 166, 54), 'os.path.splitext', 'os.path.splitext', ({(166, 45, 166, 53): 'filename'}, {}), '(filename)', False, 'import os\n'), ((173, 36, 173, 64), 'os.path.join', 'os.path.join', ({(173, 49, 173, 60): 'root_folder', (173, 62, 173, 63): 'f'}, {}), '(root_folder, f)', False, 'import os\n'), ((180, 33, 180, 61), 'os.path.join', 'os.path.join', ({(180, 46, 180, 57): 'root_folder', (180, 59, 180, 60): 'f'}, {}), '(root_folder, f)', False, 'import os\n')] |
HanGuo97/federated | differential_privacy/run_federated.py | 7e64bfe86bb606fad2ea7bc2a0f8ebdb565546f9 | # Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs federated training with differential privacy on various tasks."""
import functools
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
from utils import task_utils
from utils import training_utils
from utils import utils_impl
from utils.optimizers import optimizer_utils
with utils_impl.record_hparam_flags() as optimizer_flags:
# Defining optimizer flags
optimizer_utils.define_optimizer_flags('client')
optimizer_utils.define_optimizer_flags('server')
with utils_impl.record_hparam_flags() as shared_flags:
# Federated training hyperparameters
flags.DEFINE_integer('client_epochs_per_round', 1,
'Number of epochs in the client to take per round.')
flags.DEFINE_integer('client_batch_size', 20, 'Batch size on the clients.')
flags.DEFINE_integer('clients_per_round', 10,
'How many clients to sample per round.')
flags.DEFINE_integer('client_datasets_random_seed', 1,
'Random seed for client sampling.')
flags.DEFINE_integer(
'max_elements_per_client', None, 'Maximum number of '
'elements for each training client. If set to None, all '
'available examples are used.')
# Training loop configuration
flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.')
flags.DEFINE_string(
'experiment_name', None, 'The name of this experiment. Will be append to '
'--root_output_dir to separate experiment results.')
flags.DEFINE_string('root_output_dir', '/tmp/fed_opt/',
'Root directory for writing experiment output.')
flags.DEFINE_integer(
'rounds_per_eval', 1,
'How often to evaluate the global model on the validation dataset.')
flags.DEFINE_integer(
'num_validation_examples', -1, 'The number of validation'
'examples to use. If set to -1, all available examples '
'are used.')
flags.DEFINE_integer('rounds_per_checkpoint', 50,
'How often to checkpoint the global model.')
with utils_impl.record_hparam_flags() as dp_flags:
# Differential privacy flags
flags.DEFINE_float(
'clip', None, 'Clip value for fixed clipping or initial clip for '
'adaptive clipping. If None, no clipping is used.')
flags.DEFINE_float('noise_multiplier', None,
'Noise multiplier. If None, non-DP aggregator is used.')
flags.DEFINE_float(
'adaptive_clip_learning_rate', None, 'Adaptive clip learning rate. If '
'None, clip adaptation is not used.')
flags.DEFINE_float('target_unclipped_quantile', 0.5,
'Target unclipped quantile.')
flags.DEFINE_boolean('uniform_weighting', False,
'Whether to weigh clients uniformly.')
# Task specification
with utils_impl.record_hparam_flags() as task_flags:
task_utils.define_task_flags()
FLAGS = flags.FLAGS
def _write_hparam_flags():
"""Returns an ordered dictionary of pertinent hyperparameter flags."""
hparam_dict = utils_impl.lookup_flag_values(shared_flags)
# Update with optimizer flags corresponding to the chosen optimizers.
opt_flag_dict = utils_impl.lookup_flag_values(optimizer_flags)
opt_flag_dict = optimizer_utils.remove_unused_flags('client', opt_flag_dict)
opt_flag_dict = optimizer_utils.remove_unused_flags('server', opt_flag_dict)
hparam_dict.update(opt_flag_dict)
# Update with task flags
task_flag_dict = utils_impl.lookup_flag_values(task_flags)
hparam_dict.update(task_flag_dict)
training_utils.write_hparams_to_csv(hparam_dict, FLAGS.root_output_dir,
FLAGS.experiment_name)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Expected no command-line arguments, '
'got: {}'.format(argv))
client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('client')
server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('server')
train_client_spec = tff.simulation.baselines.ClientSpec(
num_epochs=FLAGS.client_epochs_per_round,
batch_size=FLAGS.client_batch_size,
max_elements=FLAGS.max_elements_per_client)
task = task_utils.create_task_from_flags(train_client_spec)
logging.info('Trainable weights:')
for weight in task.model_fn().trainable_variables:
logging.info('name: %s shape: %s', weight.name, weight.shape)
if FLAGS.uniform_weighting:
client_weighting = tff.learning.ClientWeighting.UNIFORM
elif FLAGS.task == 'shakespeare_character' or FLAGS.task == 'stackoverflow_word':
def client_weighting(local_outputs):
return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)
else:
client_weighting = None
if FLAGS.noise_multiplier is None:
if FLAGS.uniform_weighting:
aggregation_factory = tff.aggregators.UnweightedMeanFactory()
else:
aggregation_factory = tff.aggregators.MeanFactory()
if FLAGS.clip is not None:
if FLAGS.clip <= 0:
raise ValueError('clip must be positive if clipping is enabled.')
if FLAGS.adaptive_clip_learning_rate is None:
clip = FLAGS.clip
else:
if FLAGS.adaptive_clip_learning_rate <= 0:
raise ValueError('adaptive_clip_learning_rate must be positive if '
'adaptive clipping is enabled.')
clip = tff.aggregators.PrivateQuantileEstimationProcess.no_noise(
initial_estimate=FLAGS.clip,
target_quantile=FLAGS.target_unclipped_quantile,
learning_rate=FLAGS.adaptive_clip_learning_rate)
aggregation_factory = tff.aggregators.clipping_factory(
clip, aggregation_factory)
else:
if not FLAGS.uniform_weighting:
raise ValueError(
'Differential privacy is only implemented for uniform weighting.')
if FLAGS.noise_multiplier <= 0:
raise ValueError('noise_multiplier must be positive if DP is enabled.')
if FLAGS.clip is None or FLAGS.clip <= 0:
raise ValueError('clip must be positive if DP is enabled.')
if FLAGS.adaptive_clip_learning_rate is None:
aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_fixed(
noise_multiplier=FLAGS.noise_multiplier,
clients_per_round=FLAGS.clients_per_round,
clip=FLAGS.clip)
else:
if FLAGS.adaptive_clip_learning_rate <= 0:
raise ValueError('adaptive_clip_learning_rate must be positive if '
'adaptive clipping is enabled.')
aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive(
noise_multiplier=FLAGS.noise_multiplier,
clients_per_round=FLAGS.clients_per_round,
initial_l2_norm_clip=FLAGS.clip,
target_unclipped_quantile=FLAGS.target_unclipped_quantile,
learning_rate=FLAGS.adaptive_clip_learning_rate)
iterative_process = tff.learning.build_federated_averaging_process(
model_fn=task.model_fn,
server_optimizer_fn=server_optimizer_fn,
client_weighting=client_weighting,
client_optimizer_fn=client_optimizer_fn,
model_update_aggregation_factory=aggregation_factory)
train_data = task.datasets.train_data.preprocess(
task.datasets.train_preprocess_fn)
training_process = (
tff.simulation.compose_dataset_computation_with_iterative_process(
train_data.dataset_computation, iterative_process))
training_selection_fn = functools.partial(
tff.simulation.build_uniform_sampling_fn(
train_data.client_ids, random_seed=FLAGS.client_datasets_random_seed),
size=FLAGS.clients_per_round)
test_data = task.datasets.get_centralized_test_data()
validation_data = test_data.take(FLAGS.num_validation_examples)
federated_eval = tff.learning.build_federated_evaluation(task.model_fn)
evaluation_selection_fn = lambda round_num: [validation_data]
def evaluation_fn(state, evaluation_data):
return federated_eval(state.model, evaluation_data)
program_state_manager, metrics_managers = training_utils.create_managers(
FLAGS.root_output_dir, FLAGS.experiment_name)
_write_hparam_flags()
state = tff.simulation.run_training_process(
training_process=training_process,
training_selection_fn=training_selection_fn,
total_rounds=FLAGS.total_rounds,
evaluation_fn=evaluation_fn,
evaluation_selection_fn=evaluation_selection_fn,
rounds_per_evaluation=FLAGS.rounds_per_eval,
program_state_manager=program_state_manager,
rounds_per_saving_program_state=FLAGS.rounds_per_checkpoint,
metrics_managers=metrics_managers)
test_metrics = federated_eval(state.model, [test_data])
for metrics_manager in metrics_managers:
metrics_manager.release(test_metrics, FLAGS.total_rounds + 1)
if __name__ == '__main__':
app.run(main)
| [((29, 5, 29, 37), 'utils.utils_impl.record_hparam_flags', 'utils_impl.record_hparam_flags', ({}, {}), '()', False, 'from utils import utils_impl\n'), ((31, 2, 31, 50), 'utils.optimizers.optimizer_utils.define_optimizer_flags', 'optimizer_utils.define_optimizer_flags', ({(31, 41, 31, 49): '"""client"""'}, {}), "('client')", False, 'from utils.optimizers import optimizer_utils\n'), ((32, 2, 32, 50), 'utils.optimizers.optimizer_utils.define_optimizer_flags', 'optimizer_utils.define_optimizer_flags', ({(32, 41, 32, 49): '"""server"""'}, {}), "('server')", False, 'from utils.optimizers import optimizer_utils\n'), ((34, 5, 34, 37), 'utils.utils_impl.record_hparam_flags', 'utils_impl.record_hparam_flags', ({}, {}), '()', False, 'from utils import utils_impl\n'), ((36, 2, 37, 75), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(36, 23, 36, 48): '"""client_epochs_per_round"""', (36, 50, 36, 51): '(1)', (37, 23, 37, 74): '"""Number of epochs in the client to take per round."""'}, {}), "('client_epochs_per_round', 1,\n 'Number of epochs in the client to take per round.')", False, 'from absl import flags\n'), ((38, 2, 38, 77), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(38, 23, 38, 42): '"""client_batch_size"""', (38, 44, 38, 46): '(20)', (38, 48, 38, 76): '"""Batch size on the clients."""'}, {}), "('client_batch_size', 20, 'Batch size on the clients.')", False, 'from absl import flags\n'), ((39, 2, 40, 63), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(39, 23, 39, 42): '"""clients_per_round"""', (39, 44, 39, 46): '(10)', (40, 23, 40, 62): '"""How many clients to sample per round."""'}, {}), "('clients_per_round', 10,\n 'How many clients to sample per round.')", False, 'from absl import flags\n'), ((41, 2, 42, 58), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(41, 23, 41, 52): '"""client_datasets_random_seed"""', (41, 54, 41, 55): '(1)', (42, 23, 42, 57): '"""Random seed for client sampling."""'}, {}), "('client_datasets_random_seed', 1,\n 'Random seed for client sampling.')", False, 'from absl import flags\n'), ((43, 2, 46, 37), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(44, 6, 44, 31): '"""max_elements_per_client"""', (44, 33, 44, 37): 'None', (44, 39, 46, 36): '"""Maximum number of elements for each training client. If set to None, all available examples are used."""'}, {}), "('max_elements_per_client', None,\n 'Maximum number of elements for each training client. If set to None, all available examples are used.'\n )", False, 'from absl import flags\n'), ((49, 2, 49, 79), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(49, 23, 49, 37): '"""total_rounds"""', (49, 39, 49, 42): '(200)', (49, 44, 49, 78): '"""Number of total training rounds."""'}, {}), "('total_rounds', 200, 'Number of total training rounds.')", False, 'from absl import flags\n'), ((50, 2, 52, 58), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(51, 6, 51, 23): '"""experiment_name"""', (51, 25, 51, 29): 'None', (51, 31, 52, 57): '"""The name of this experiment. Will be append to --root_output_dir to separate experiment results."""'}, {}), "('experiment_name', None,\n 'The name of this experiment. Will be append to --root_output_dir to separate experiment results.'\n )", False, 'from absl import flags\n'), ((53, 2, 54, 70), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(53, 22, 53, 39): '"""root_output_dir"""', (53, 41, 53, 56): '"""/tmp/fed_opt/"""', (54, 22, 54, 69): '"""Root directory for writing experiment output."""'}, {}), "('root_output_dir', '/tmp/fed_opt/',\n 'Root directory for writing experiment output.')", False, 'from absl import flags\n'), ((55, 2, 57, 74), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(56, 6, 56, 23): '"""rounds_per_eval"""', (56, 25, 56, 26): '(1)', (57, 6, 57, 73): '"""How often to evaluate the global model on the validation dataset."""'}, {}), "('rounds_per_eval', 1,\n 'How often to evaluate the global model on the validation dataset.')", False, 'from absl import flags\n'), ((58, 2, 61, 18), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(59, 6, 59, 31): '"""num_validation_examples"""', (59, 33, 59, 35): '(-1)', (59, 37, 61, 17): '"""The number of validationexamples to use. If set to -1, all available examples are used."""'}, {}), "('num_validation_examples', -1,\n 'The number of validationexamples to use. If set to -1, all available examples are used.'\n )", False, 'from absl import flags\n'), ((62, 2, 63, 67), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(62, 23, 62, 46): '"""rounds_per_checkpoint"""', (62, 48, 62, 50): '(50)', (63, 23, 63, 66): '"""How often to checkpoint the global model."""'}, {}), "('rounds_per_checkpoint', 50,\n 'How often to checkpoint the global model.')", False, 'from absl import flags\n'), ((65, 5, 65, 37), 'utils.utils_impl.record_hparam_flags', 'utils_impl.record_hparam_flags', ({}, {}), '()', False, 'from utils import utils_impl\n'), ((67, 2, 69, 57), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', ({(68, 6, 68, 12): '"""clip"""', (68, 14, 68, 18): 'None', (68, 20, 69, 56): '"""Clip value for fixed clipping or initial clip for adaptive clipping. If None, no clipping is used."""'}, {}), "('clip', None,\n 'Clip value for fixed clipping or initial clip for adaptive clipping. If None, no clipping is used.'\n )", False, 'from absl import flags\n'), ((70, 2, 71, 77), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', ({(70, 21, 70, 39): '"""noise_multiplier"""', (70, 41, 70, 45): 'None', (71, 21, 71, 76): '"""Noise multiplier. If None, non-DP aggregator is used."""'}, {}), "('noise_multiplier', None,\n 'Noise multiplier. If None, non-DP aggregator is used.')", False, 'from absl import flags\n'), ((72, 2, 74, 43), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', ({(73, 6, 73, 35): '"""adaptive_clip_learning_rate"""', (73, 37, 73, 41): 'None', (73, 43, 74, 42): '"""Adaptive clip learning rate. If None, clip adaptation is not used."""'}, {}), "('adaptive_clip_learning_rate', None,\n 'Adaptive clip learning rate. If None, clip adaptation is not used.')", False, 'from absl import flags\n'), ((75, 2, 76, 50), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', ({(75, 21, 75, 48): '"""target_unclipped_quantile"""', (75, 50, 75, 53): '(0.5)', (76, 21, 76, 49): '"""Target unclipped quantile."""'}, {}), "('target_unclipped_quantile', 0.5,\n 'Target unclipped quantile.')", False, 'from absl import flags\n'), ((77, 2, 78, 61), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', ({(77, 23, 77, 42): '"""uniform_weighting"""', (77, 44, 77, 49): '(False)', (78, 23, 78, 60): '"""Whether to weigh clients uniformly."""'}, {}), "('uniform_weighting', False,\n 'Whether to weigh clients uniformly.')", False, 'from absl import flags\n'), ((81, 5, 81, 37), 'utils.utils_impl.record_hparam_flags', 'utils_impl.record_hparam_flags', ({}, {}), '()', False, 'from utils import utils_impl\n'), ((82, 2, 82, 32), 'utils.task_utils.define_task_flags', 'task_utils.define_task_flags', ({}, {}), '()', False, 'from utils import task_utils\n'), ((89, 16, 89, 59), 'utils.utils_impl.lookup_flag_values', 'utils_impl.lookup_flag_values', ({(89, 46, 89, 58): 'shared_flags'}, {}), '(shared_flags)', False, 'from utils import utils_impl\n'), ((92, 18, 92, 64), 'utils.utils_impl.lookup_flag_values', 'utils_impl.lookup_flag_values', ({(92, 48, 92, 63): 'optimizer_flags'}, {}), '(optimizer_flags)', False, 'from utils import utils_impl\n'), ((93, 18, 93, 78), 'utils.optimizers.optimizer_utils.remove_unused_flags', 'optimizer_utils.remove_unused_flags', ({(93, 54, 93, 62): '"""client"""', (93, 64, 93, 77): 'opt_flag_dict'}, {}), "('client', opt_flag_dict)", False, 'from utils.optimizers import optimizer_utils\n'), ((94, 18, 94, 78), 'utils.optimizers.optimizer_utils.remove_unused_flags', 'optimizer_utils.remove_unused_flags', ({(94, 54, 94, 62): '"""server"""', (94, 64, 94, 77): 'opt_flag_dict'}, {}), "('server', opt_flag_dict)", False, 'from utils.optimizers import optimizer_utils\n'), ((98, 19, 98, 60), 'utils.utils_impl.lookup_flag_values', 'utils_impl.lookup_flag_values', ({(98, 49, 98, 59): 'task_flags'}, {}), '(task_flags)', False, 'from utils import utils_impl\n'), ((100, 2, 101, 60), 'utils.training_utils.write_hparams_to_csv', 'training_utils.write_hparams_to_csv', ({(100, 38, 100, 49): 'hparam_dict', (100, 51, 100, 72): 'FLAGS.root_output_dir', (101, 38, 101, 59): 'FLAGS.experiment_name'}, {}), '(hparam_dict, FLAGS.root_output_dir,\n FLAGS.experiment_name)', False, 'from utils import training_utils\n'), ((109, 24, 109, 80), 'utils.optimizers.optimizer_utils.create_optimizer_fn_from_flags', 'optimizer_utils.create_optimizer_fn_from_flags', ({(109, 71, 109, 79): '"""client"""'}, {}), "('client')", False, 'from utils.optimizers import optimizer_utils\n'), ((110, 24, 110, 80), 'utils.optimizers.optimizer_utils.create_optimizer_fn_from_flags', 'optimizer_utils.create_optimizer_fn_from_flags', ({(110, 71, 110, 79): '"""server"""'}, {}), "('server')", False, 'from utils.optimizers import optimizer_utils\n'), ((112, 22, 115, 49), 'tensorflow_federated.simulation.baselines.ClientSpec', 'tff.simulation.baselines.ClientSpec', (), '', True, 'import tensorflow_federated as tff\n'), ((116, 9, 116, 61), 'utils.task_utils.create_task_from_flags', 'task_utils.create_task_from_flags', ({(116, 43, 116, 60): 'train_client_spec'}, {}), '(train_client_spec)', False, 'from utils import task_utils\n'), ((118, 2, 118, 36), 'absl.logging.info', 'logging.info', ({(118, 15, 118, 35): '"""Trainable weights:"""'}, {}), "('Trainable weights:')", False, 'from absl import logging\n'), ((175, 22, 180, 59), 'tensorflow_federated.learning.build_federated_averaging_process', 'tff.learning.build_federated_averaging_process', (), '', True, 'import tensorflow_federated as tff\n'), ((184, 6, 185, 60), 'tensorflow_federated.simulation.compose_dataset_computation_with_iterative_process', 'tff.simulation.compose_dataset_computation_with_iterative_process', ({(185, 10, 185, 40): 'train_data.dataset_computation', (185, 42, 185, 59): 'iterative_process'}, {}), '(train_data\n .dataset_computation, iterative_process)', True, 'import tensorflow_federated as tff\n'), ((194, 19, 194, 73), 'tensorflow_federated.learning.build_federated_evaluation', 'tff.learning.build_federated_evaluation', ({(194, 59, 194, 72): 'task.model_fn'}, {}), '(task.model_fn)', True, 'import tensorflow_federated as tff\n'), ((200, 44, 201, 51), 'utils.training_utils.create_managers', 'training_utils.create_managers', ({(201, 6, 201, 27): 'FLAGS.root_output_dir', (201, 29, 201, 50): 'FLAGS.experiment_name'}, {}), '(FLAGS.root_output_dir, FLAGS.experiment_name)', False, 'from utils import training_utils\n'), ((203, 10, 212, 40), 'tensorflow_federated.simulation.run_training_process', 'tff.simulation.run_training_process', (), '', True, 'import tensorflow_federated as tff\n'), ((220, 2, 220, 15), 'absl.app.run', 'app.run', ({(220, 10, 220, 14): 'main'}, {}), '(main)', False, 'from absl import app\n'), ((120, 4, 120, 66), 'absl.logging.info', 'logging.info', ({(120, 17, 120, 38): '"""name: %s shape: %s"""', (120, 40, 120, 51): 'weight.name', (120, 53, 120, 65): 'weight.shape'}, {}), "('name: %s shape: %s', weight.name, weight.shape)", False, 'from absl import logging\n'), ((188, 6, 189, 79), 'tensorflow_federated.simulation.build_uniform_sampling_fn', 'tff.simulation.build_uniform_sampling_fn', (), '', True, 'import tensorflow_federated as tff\n'), ((133, 28, 133, 67), 'tensorflow_federated.aggregators.UnweightedMeanFactory', 'tff.aggregators.UnweightedMeanFactory', ({}, {}), '()', True, 'import tensorflow_federated as tff\n'), ((135, 28, 135, 57), 'tensorflow_federated.aggregators.MeanFactory', 'tff.aggregators.MeanFactory', ({}, {}), '()', True, 'import tensorflow_federated as tff\n'), ((149, 28, 150, 36), 'tensorflow_federated.aggregators.clipping_factory', 'tff.aggregators.clipping_factory', ({(150, 10, 150, 14): 'clip', (150, 16, 150, 35): 'aggregation_factory'}, {}), '(clip, aggregation_factory)', True, 'import tensorflow_federated as tff\n'), ((160, 28, 163, 26), 'tensorflow_federated.aggregators.DifferentiallyPrivateFactory.gaussian_fixed', 'tff.aggregators.DifferentiallyPrivateFactory.gaussian_fixed', (), '', True, 'import tensorflow_federated as tff\n'), ((168, 28, 173, 58), 'tensorflow_federated.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive', 'tff.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive', (), '', True, 'import tensorflow_federated as tff\n'), ((145, 15, 148, 60), 'tensorflow_federated.aggregators.PrivateQuantileEstimationProcess.no_noise', 'tff.aggregators.PrivateQuantileEstimationProcess.no_noise', (), '', True, 'import tensorflow_federated as tff\n'), ((127, 21, 127, 60), 'tensorflow.squeeze', 'tf.squeeze', ({(127, 32, 127, 59): "local_outputs['num_tokens']"}, {}), "(local_outputs['num_tokens'])", True, 'import tensorflow as tf\n')] |
anhvth/Pseudo_Lidar_V2 | waymo_kitti_converter/tools/visual_point_cloud.py | d7a29ffc811e315df25bba2a43acf288d4ceb30e | import open3d as o3d
import numpy as np
pc_load_pathname = '/home/caizhongang/github/waymo_kitti_converter/007283-000.bin'
pc = np.fromfile(pc_load_pathname, dtype=np.float32).reshape(-1, 3)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pc)
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0,0,0])
visual = [pcd, axis]
o3d.visualization.draw_geometries(visual)
| [((7, 6, 7, 31), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ({}, {}), '()', True, 'import open3d as o3d\n'), ((8, 13, 8, 43), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', ({(8, 40, 8, 42): 'pc'}, {}), '(pc)', True, 'import open3d as o3d\n'), ((9, 7, 9, 80), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', (), '', True, 'import open3d as o3d\n'), ((11, 0, 11, 41), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', ({(11, 34, 11, 40): 'visual'}, {}), '(visual)', True, 'import open3d as o3d\n'), ((5, 5, 5, 52), 'numpy.fromfile', 'np.fromfile', (), '', True, 'import numpy as np\n')] |
scottwedge/OpenStack-Stein | designate-8.0.0/designate/tests/test_api/test_v2/test_limits.py | 7077d1f602031dace92916f14e36b124f474de15 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from designate.tests.test_api.test_v2 import ApiV2TestCase
class ApiV2LimitsTest(ApiV2TestCase):
def test_get_limits(self):
response = self.client.get('/limits/')
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('max_zones', response.json)
self.assertIn('max_zone_records', response.json)
self.assertIn('max_zone_recordsets',
response.json)
self.assertIn('max_recordset_records',
response.json)
self.assertIn('min_ttl', response.json)
self.assertIn('max_zone_name_length',
response.json)
self.assertIn('max_recordset_name_length',
response.json)
self.assertIn('max_page_limit',
response.json)
absolutelimits = response.json
self.assertEqual(cfg.CONF.quota_zones, absolutelimits['max_zones'])
self.assertEqual(cfg.CONF.quota_zone_records,
absolutelimits['max_zone_recordsets'])
self.assertEqual(cfg.CONF['service:central'].min_ttl,
absolutelimits['min_ttl'])
self.assertEqual(cfg.CONF['service:central'].max_zone_name_len,
absolutelimits['max_zone_name_length'])
self.assertEqual(cfg.CONF['service:central'].max_recordset_name_len,
absolutelimits['max_recordset_name_length'])
self.assertEqual(cfg.CONF['service:api'].max_limit_v2,
absolutelimits['max_page_limit'])
| [] |
alexus37/AugmentedRealityChess | pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGLContext/scenegraph/nodepath.py | 7f600ad153270feff12aa7aa86d7ed0a49ebc71c | """node-path implementation for OpenGLContext
"""
from vrml.vrml97 import nodepath, nodetypes
from vrml.cache import CACHE
from OpenGLContext import quaternion
from OpenGL.GL import glMultMatrixf
class _NodePath( object ):
"""OpenGLContext-specific node-path class
At the moment this only adds a single method,
transform() which traverses the path, calling
transform() for each Transforming node which
has a transform method.
"""
__slots__ = ()
def transform( self, mode=None, translate=1, scale=1, rotate=1 ):
"""For each Transforming node, do OpenGL transform
Does _not_ push-pop matrices, so do that before
if you want to save your current matrix. This method
is useful primarily for storing paths to, for instance,
bindable nodes, where you want to be able to rapidly
transform down to the node, without needing a full
traversal of the scenegraph.
"""
matrix = self.transformMatrix(
translate=translate, scale=scale, rotate=rotate
)
glMultMatrixf(
matrix
)
def quaternion( self ):
"""Get summary quaternion for all rotations in stack"""
nodes = [
node
for node in self
if (
isinstance(node, nodetypes.Transforming) and
hasattr( node, "orientation")
)
]
q = quaternion.Quaternion()
for node in nodes:
q = q * quaternion.fromXYZR( *node.orientation )
return q
class NodePath( _NodePath, nodepath.NodePath ):
pass
class WeakNodePath( _NodePath, nodepath.WeakNodePath ):
pass
| [((30, 8, 32, 9), 'OpenGL.GL.glMultMatrixf', 'glMultMatrixf', ({(31, 12, 31, 18): 'matrix'}, {}), '(matrix)', False, 'from OpenGL.GL import glMultMatrixf\n'), ((43, 12, 43, 35), 'OpenGLContext.quaternion.Quaternion', 'quaternion.Quaternion', ({}, {}), '()', False, 'from OpenGLContext import quaternion\n'), ((45, 20, 45, 60), 'OpenGLContext.quaternion.fromXYZR', 'quaternion.fromXYZR', ({(45, 41, 45, 58): '*node.orientation'}, {}), '(*node.orientation)', False, 'from OpenGLContext import quaternion\n')] |
ApprenticeOne/python_learn | part01_basic/for_while_loop.py | 2433726b3f164526e8a8fa18739854e052d76a2e | import random
from math import sqrt
sum = 0
for x in range(101):
sum += x
print(sum)
'''
range(101) 0-100 一共101个数
range(1,101) 1-100
range(1,101,2) 1-100间的奇数 步长为2
range(100,0,-2) 100-0间的偶数 步长为-2
'''
sum = 0
for x in range(100, 0, -2):
sum += x
print(sum)
# while
# 0-100间的随机数
answer = random.randint(0, 100)
count = 0
while True:
count += 1
number = int(input("Please enter the number: "))
if number < answer:
print("more larger")
elif number > answer:
print("more smaller")
else:
print("right")
print('you got d% times to get right answer' % count)
for i in range(1, 10):
for j in range(1, i + 1):
print('%d*%d=%d' % (i, j, i * j), end='\t')
print()
# 输入一个正整数判断是不是素数
num = int(input('请输入一个正整数: '))
end = int(sqrt(num))
is_prime = True
# 为什么要放一个end 如果这个数有一个小于sqrt的因数
# 就一定会有一个大于sqrt的因数与之对应
for x in range(2, end + 1):
if num % x == 0:
is_prime = False
break
if is_prime and num != 1:
print('%d是素数' % num)
else:
print('%d不是素数' % num)
| [((25, 9, 25, 31), 'random.randint', 'random.randint', ({(25, 24, 25, 25): '0', (25, 27, 25, 30): '100'}, {}), '(0, 100)', False, 'import random\n'), ((47, 10, 47, 19), 'math.sqrt', 'sqrt', ({(47, 15, 47, 18): 'num'}, {}), '(num)', False, 'from math import sqrt\n')] |
ElementGenomicsInc/toil | src/toil/batchSystems/htcondor.py | e29a07db194469afba3edf90ffeee8f981f7344b | # Copyright (C) 2018, HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from builtins import str
import sys
import os
import logging
import time
import math
from toil.batchSystems.abstractGridEngineBatchSystem import AbstractGridEngineBatchSystem
import htcondor
import classad
logger = logging.getLogger(__name__)
class HTCondorBatchSystem(AbstractGridEngineBatchSystem):
# When using HTCondor, the Schedd handles scheduling
class Worker(AbstractGridEngineBatchSystem.Worker):
# Override the createJobs method so that we can use htcondor.Submit objects
# and so that we can get disk allocation requests and ceil the CPU request.
def createJobs(self, newJob):
activity = False
if newJob is not None:
self.waitingJobs.append(newJob)
# Queue jobs as necessary:
while len(self.waitingJobs) > 0:
activity = True
jobID, cpu, memory, disk, jobName, command = self.waitingJobs.pop(0)
# Prepare the htcondor.Submit object
submitObj = self.prepareSubmission(cpu, memory, disk, jobID, jobName, command)
logger.debug("Submitting %r", submitObj)
# Submit job and get batch system ID (i.e. the ClusterId)
batchJobID = self.submitJob(submitObj)
logger.debug("Submitted job %s", str(batchJobID))
# Store dict for mapping Toil job ID to batch job ID
# TODO: Note that this currently stores a tuple of (batch system
# ID, Task), but the second value is None by default and doesn't
# seem to be used
self.batchJobIDs[jobID] = (batchJobID, None)
# Add to queue of queued ("running") jobs
self.runningJobs.add(jobID)
# Add to allocated resources
self.allocatedCpus[jobID] = int(math.ceil(cpu))
return activity
def prepareSubmission(self, cpu, memory, disk, jobID, jobName, command):
# Convert resource requests
cpu = int(math.ceil(cpu)) # integer CPUs
memory = float(memory)/1024 # memory in KB
disk = float(disk)/1024 # disk in KB
# Workaround for HTCondor Python bindings Unicode conversion bug
command = command.encode('utf-8')
# Execute the entire command as /bin/sh -c "command"
# TODO: Transfer the jobStore directory if using a local file store with a relative path.
submit_parameters = {
'executable': '/bin/sh',
'transfer_executable': 'False',
'arguments': '''"-c '{0}'"'''.format(command),
'environment': self.getEnvString(),
'request_cpus': '{0}'.format(cpu),
'request_memory': '{0:.3f}KB'.format(memory),
'request_disk': '{0:.3f}KB'.format(disk),
'leave_in_queue': '(JobStatus == 4)',
'+IsToilJob': 'True',
'+ToilJobID': '{0}'.format(jobID),
'+ToilJobName': '"{0}"'.format(jobName),
'+ToilJobKilled': 'False',
}
# Return the Submit object
return htcondor.Submit(submit_parameters)
def submitJob(self, submitObj):
# Queue the job using a Schedd transaction
schedd = self.connectSchedd()
with schedd.transaction() as txn:
batchJobID = submitObj.queue(txn)
# Return the ClusterId
return batchJobID
def getRunningJobIDs(self):
# Get all Toil jobs that are running
requirements = '(JobStatus == 2) && (IsToilJob)'
projection = ['ClusterId', 'ToilJobID', 'EnteredCurrentStatus']
schedd = self.connectSchedd()
ads = schedd.xquery(requirements = requirements,
projection = projection)
# Only consider the Toil jobs that are part of this workflow
batchJobIDs = [batchJobID for (batchJobID, task) in self.batchJobIDs.values()]
job_runtimes = {}
for ad in ads:
batchJobID = int(ad['ClusterId'])
jobID = int(ad['ToilJobID'])
if not (batchJobID in batchJobIDs):
continue
# HTCondor stores the start of the runtime as a Unix timestamp
runtime = time.time() - ad['EnteredCurrentStatus']
job_runtimes[jobID] = runtime
return job_runtimes
def killJob(self, jobID):
batchJobID = self.batchJobIDs[jobID][0]
logger.debug("Killing HTCondor job {0}".format(batchJobID))
# Set the job to be killed when its exit status is checked
schedd = self.connectSchedd()
job_spec = '(ClusterId == {0})'.format(batchJobID)
schedd.edit(job_spec, 'ToilJobKilled', 'True')
def getJobExitCode(self, batchJobID):
logger.debug("Getting exit code for HTCondor job {0}".format(batchJobID))
status = {
1: 'Idle',
2: 'Running',
3: 'Removed',
4: 'Completed',
5: 'Held',
6: 'Transferring Output',
7: 'Suspended'
}
requirements = '(ClusterId == {0})'.format(batchJobID)
projection = ['JobStatus', 'ToilJobKilled', 'ExitCode',
'HoldReason', 'HoldReasonSubCode']
schedd = self.connectSchedd()
ads = schedd.xquery(requirements = requirements, projection = projection)
# Make sure a ClassAd was returned
try:
ad = ads.next()
except StopIteration:
logger.error(
"No HTCondor ads returned using constraint: {0}".format(requirements))
raise
# Make sure only one ClassAd was returned
try:
ads.next()
except StopIteration:
pass
else:
logger.warning(
"Multiple HTCondor ads returned using constraint: {0}".format(requirements))
if ad['ToilJobKilled']:
logger.debug("HTCondor job {0} was killed by Toil".format(batchJobID))
# Remove the job from the Schedd and return 1
job_spec = 'ClusterId == {0}'.format(batchJobID)
schedd.act(htcondor.JobAction.Remove, job_spec)
return 1
elif status[ad['JobStatus']] == 'Completed':
logger.debug("HTCondor job {0} completed with exit code {1}".format(
batchJobID, ad['ExitCode']))
# Remove the job from the Schedd and return its exit code
job_spec = 'ClusterId == {0}'.format(batchJobID)
schedd.act(htcondor.JobAction.Remove, job_spec)
return int(ad['ExitCode'])
elif status[ad['JobStatus']] == 'Held':
logger.error("HTCondor job {0} was held: '{1} (sub code {2})'".format(
batchJobID, ad['HoldReason'], ad['HoldReasonSubCode']))
# Remove the job from the Schedd and return 1
job_spec = 'ClusterId == {0}'.format(batchJobID)
schedd.act(htcondor.JobAction.Remove, job_spec)
return 1
else: # Job still running or idle or doing something else
logger.debug("HTCondor job {0} has not completed (Status: {1})".format(
batchJobID, status[ad['JobStatus']]))
return None
"""
Implementation-specific helper methods
"""
def connectSchedd(self):
'''Connect to HTCondor Schedd and return a Schedd object'''
condor_host = os.getenv('TOIL_HTCONDOR_COLLECTOR')
schedd_name = os.getenv('TOIL_HTCONDOR_SCHEDD')
# If TOIL_HTCONDOR_ variables are set, use them to find the Schedd
if condor_host and schedd_name:
logger.debug(
"Connecting to HTCondor Schedd {0} using Collector at {1}".format(
schedd_name, condor_host))
try:
schedd_ad = htcondor.Collector(condor_host).locate(
htcondor.DaemonTypes.Schedd, schedd_name)
except IOError:
logger.error(
"Could not connect to HTCondor Collector at {0}".format(condor_host))
raise
except ValueError:
logger.error(
"Could not find HTCondor Schedd with name {0}".format(schedd_name))
raise
else:
schedd = htcondor.Schedd(schedd_ad)
# Otherwise assume the Schedd is on the local machine
else:
logger.debug("Connecting to HTCondor Schedd on local machine")
schedd = htcondor.Schedd()
# Ping the Schedd to make sure it's there and responding
try:
schedd.xquery(limit = 0)
except RuntimeError:
logger.error("Could not connect to HTCondor Schedd")
raise
return schedd
def getEnvString(self):
'''Build an environment string that a HTCondor Submit object can use.
For examples of valid strings, see:
http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html#man-condor-submit-environment
'''
env_items = []
if self.boss.environment:
for key, value in self.boss.environment.items():
# Each variable should be in the form of <key>='<value>'
env_string = key + "="
# The entire value should be encapsulated in single quotes
# Quote marks (single or double) that are part of the value should be duplicated
env_string += "'" + value.replace("'", "''").replace('"', '""') + "'"
env_items.append(env_string)
# The entire string should be encapsulated in double quotes
# Each variable should be separated by a single space
return '"' + ' '.join(env_items) + '"'
# Override the issueBatchJob method so HTCondor can be given the disk request
def issueBatchJob(self, jobNode):
# Avoid submitting internal jobs to the batch queue, handle locally
localID = self.handleLocalJob(jobNode)
if localID:
return localID
else:
self.checkResourceRequest(jobNode.memory, jobNode.cores, jobNode.disk)
jobID = self.getNextJobID()
self.currentJobs.add(jobID)
# Add the jobNode.disk and jobNode.jobName to the job tuple
self.newJobsQueue.put((jobID, jobNode.cores, jobNode.memory, jobNode.disk, jobNode.jobName, jobNode.command))
logger.debug("Issued the job command: %s with job id: %s ", jobNode.command, str(jobID))
return jobID
@classmethod
def obtainSystemConstants(cls):
# Since it's not always clear what the max cpus and max memory available
# in an HTCondor slot might be, use some reasonable constants for now.
# TODO: Use a htcondor.Collector().query() to determine reasonable values.
max_cpu = 4
max_mem = 4e9
return max_cpu, max_mem
| [((30, 9, 30, 36), 'logging.getLogger', 'logging.getLogger', ({(30, 27, 30, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((100, 19, 100, 53), 'htcondor.Submit', 'htcondor.Submit', ({(100, 35, 100, 52): 'submit_parameters'}, {}), '(submit_parameters)', False, 'import htcondor\n'), ((221, 26, 221, 62), 'os.getenv', 'os.getenv', ({(221, 36, 221, 61): '"""TOIL_HTCONDOR_COLLECTOR"""'}, {}), "('TOIL_HTCONDOR_COLLECTOR')", False, 'import os\n'), ((222, 26, 222, 59), 'os.getenv', 'os.getenv', ({(222, 36, 222, 58): '"""TOIL_HTCONDOR_SCHEDD"""'}, {}), "('TOIL_HTCONDOR_SCHEDD')", False, 'import os\n'), ((75, 22, 75, 36), 'math.ceil', 'math.ceil', ({(75, 32, 75, 35): 'cpu'}, {}), '(cpu)', False, 'import math\n'), ((246, 25, 246, 42), 'htcondor.Schedd', 'htcondor.Schedd', ({}, {}), '()', False, 'import htcondor\n'), ((295, 89, 295, 99), 'builtins.str', 'str', ({(295, 93, 295, 98): 'jobID'}, {}), '(jobID)', False, 'from builtins import str\n'), ((56, 49, 56, 64), 'builtins.str', 'str', ({(56, 53, 56, 63): 'batchJobID'}, {}), '(batchJobID)', False, 'from builtins import str\n'), ((68, 48, 68, 62), 'math.ceil', 'math.ceil', ({(68, 58, 68, 61): 'cpu'}, {}), '(cpu)', False, 'import math\n'), ((131, 26, 131, 37), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((241, 29, 241, 55), 'htcondor.Schedd', 'htcondor.Schedd', ({(241, 45, 241, 54): 'schedd_ad'}, {}), '(schedd_ad)', False, 'import htcondor\n'), ((230, 32, 230, 63), 'htcondor.Collector', 'htcondor.Collector', ({(230, 51, 230, 62): 'condor_host'}, {}), '(condor_host)', False, 'import htcondor\n')] |
alanlv/PaddleSpeech | paddlespeech/t2s/modules/tacotron2/decoder.py | 7413c9e48ac77fdece45e0b4ffe41f7746ef0583 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
"""Tacotron2 decoder related modules."""
import paddle
import paddle.nn.functional as F
import six
from paddle import nn
from paddlespeech.t2s.modules.tacotron2.attentions import AttForwardTA
class Prenet(nn.Layer):
"""Prenet module for decoder of Spectrogram prediction network.
This is a module of Prenet in the decoder of Spectrogram prediction network,
which described in `Natural TTS
Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_.
The Prenet preforms nonlinear conversion
of inputs before input to auto-regressive lstm,
which helps to learn diagonal attentions.
Notes
----------
This module alway applies dropout even in evaluation.
See the detail in `Natural TTS Synthesis by
Conditioning WaveNet on Mel Spectrogram Predictions`_.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(self, idim, n_layers=2, n_units=256, dropout_rate=0.5):
"""Initialize prenet module.
Parameters
----------
idim : int
Dimension of the inputs.
odim : int
Dimension of the outputs.
n_layers : int, optional
The number of prenet layers.
n_units : int, optional
The number of prenet units.
"""
super().__init__()
self.dropout_rate = dropout_rate
self.prenet = nn.LayerList()
for layer in six.moves.range(n_layers):
n_inputs = idim if layer == 0 else n_units
self.prenet.append(
nn.Sequential(nn.Linear(n_inputs, n_units), nn.ReLU()))
def forward(self, x):
"""Calculate forward propagation.
Parameters
----------
x : Tensor
Batch of input tensors (B, ..., idim).
Returns
----------
Tensor
Batch of output tensors (B, ..., odim).
"""
for i in six.moves.range(len(self.prenet)):
# F.dropout 引入了随机, tacotron2 的 dropout 是不能去掉的
x = F.dropout(self.prenet[i](x))
return x
class Postnet(nn.Layer):
"""Postnet module for Spectrogram prediction network.
This is a module of Postnet in Spectrogram prediction network,
which described in `Natural TTS Synthesis by
Conditioning WaveNet on Mel Spectrogram Predictions`_.
The Postnet predicts refines the predicted
Mel-filterbank of the decoder,
which helps to compensate the detail sturcture of spectrogram.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(
self,
idim,
odim,
n_layers=5,
n_chans=512,
n_filts=5,
dropout_rate=0.5,
use_batch_norm=True, ):
"""Initialize postnet module.
Parameters
----------
idim : int
Dimension of the inputs.
odim : int
Dimension of the outputs.
n_layers : int, optional
The number of layers.
n_filts : int, optional
The number of filter size.
n_units : int, optional
The number of filter channels.
use_batch_norm : bool, optional
Whether to use batch normalization..
dropout_rate : float, optional
Dropout rate..
"""
super().__init__()
self.postnet = nn.LayerList()
for layer in six.moves.range(n_layers - 1):
ichans = odim if layer == 0 else n_chans
ochans = odim if layer == n_layers - 1 else n_chans
if use_batch_norm:
self.postnet.append(
nn.Sequential(
nn.Conv1D(
ichans,
ochans,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias_attr=False, ),
nn.BatchNorm1D(ochans),
nn.Tanh(),
nn.Dropout(dropout_rate), ))
else:
self.postnet.append(
nn.Sequential(
nn.Conv1D(
ichans,
ochans,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias_attr=False, ),
nn.Tanh(),
nn.Dropout(dropout_rate), ))
ichans = n_chans if n_layers != 1 else odim
if use_batch_norm:
self.postnet.append(
nn.Sequential(
nn.Conv1D(
ichans,
odim,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias_attr=False, ),
nn.BatchNorm1D(odim),
nn.Dropout(dropout_rate), ))
else:
self.postnet.append(
nn.Sequential(
nn.Conv1D(
ichans,
odim,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias_attr=False, ),
nn.Dropout(dropout_rate), ))
def forward(self, xs):
"""Calculate forward propagation.
Parameters
----------
xs : Tensor
Batch of the sequences of padded input tensors (B, idim, Tmax).
Returns
----------
Tensor
Batch of padded output tensor. (B, odim, Tmax).
"""
for i in six.moves.range(len(self.postnet)):
xs = self.postnet[i](xs)
return xs
class ZoneOutCell(nn.Layer):
"""ZoneOut Cell module.
This is a module of zoneout described in
`Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations`_.
This code is modified from `eladhoffer/seq2seq.pytorch`_.
Examples
----------
>>> lstm = paddle.nn.LSTMCell(16, 32)
>>> lstm = ZoneOutCell(lstm, 0.5)
.. _`Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations`:
https://arxiv.org/abs/1606.01305
.. _`eladhoffer/seq2seq.pytorch`:
https://github.com/eladhoffer/seq2seq.pytorch
"""
def __init__(self, cell, zoneout_rate=0.1):
"""Initialize zone out cell module.
Parameters
----------
cell : nn.Layer:
Paddle recurrent cell module
e.g. `paddle.nn.LSTMCell`.
zoneout_rate : float, optional
Probability of zoneout from 0.0 to 1.0.
"""
super().__init__()
self.cell = cell
self.hidden_size = cell.hidden_size
self.zoneout_rate = zoneout_rate
if zoneout_rate > 1.0 or zoneout_rate < 0.0:
raise ValueError(
"zoneout probability must be in the range from 0.0 to 1.0.")
def forward(self, inputs, hidden):
"""Calculate forward propagation.
Parameters
----------
inputs : Tensor
Batch of input tensor (B, input_size).
hidden : tuple
- Tensor: Batch of initial hidden states (B, hidden_size).
- Tensor: Batch of initial cell states (B, hidden_size).
Returns
----------
Tensor
Batch of next hidden states (B, hidden_size).
tuple:
- Tensor: Batch of next hidden states (B, hidden_size).
- Tensor: Batch of next cell states (B, hidden_size).
"""
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.cell(inputs, hidden)
next_hidden = self._zoneout(hidden, next_hidden, self.zoneout_rate)
# to have the same output format with LSTMCell in paddle
return next_hidden[0], next_hidden
def _zoneout(self, h, next_h, prob):
# apply recursively
if isinstance(h, tuple):
num_h = len(h)
if not isinstance(prob, tuple):
prob = tuple([prob] * num_h)
return tuple(
[self._zoneout(h[i], next_h[i], prob[i]) for i in range(num_h)])
if self.training:
mask = paddle.bernoulli(paddle.ones([*paddle.shape(h)]) * prob)
return mask * h + (1 - mask) * next_h
else:
return prob * h + (1 - prob) * next_h
class Decoder(nn.Layer):
"""Decoder module of Spectrogram prediction network.
This is a module of decoder of Spectrogram prediction network in Tacotron2,
which described in `Natural TTS
Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_.
The decoder generates the sequence of
features from the sequence of the hidden states.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(
self,
idim,
odim,
att,
dlayers=2,
dunits=1024,
prenet_layers=2,
prenet_units=256,
postnet_layers=5,
postnet_chans=512,
postnet_filts=5,
output_activation_fn=None,
cumulate_att_w=True,
use_batch_norm=True,
use_concate=True,
dropout_rate=0.5,
zoneout_rate=0.1,
reduction_factor=1, ):
"""Initialize Tacotron2 decoder module.
Parameters
----------
idim : int
Dimension of the inputs.
odim : int
Dimension of the outputs.
att nn.Layer
Instance of attention class.
dlayers int, optional
The number of decoder lstm layers.
dunits : int, optional
The number of decoder lstm units.
prenet_layers : int, optional
The number of prenet layers.
prenet_units : int, optional
The number of prenet units.
postnet_layers : int, optional
The number of postnet layers.
postnet_filts : int, optional
The number of postnet filter size.
postnet_chans : int, optional
The number of postnet filter channels.
output_activation_fn : nn.Layer, optional
Activation function for outputs.
cumulate_att_w : bool, optional
Whether to cumulate previous attention weight.
use_batch_norm : bool, optional
Whether to use batch normalization.
use_concate : bool, optional
Whether to concatenate encoder embedding with decoder lstm outputs.
dropout_rate : float, optional
Dropout rate.
zoneout_rate : float, optional
Zoneout rate.
reduction_factor : int, optional
Reduction factor.
"""
super().__init__()
# store the hyperparameters
self.idim = idim
self.odim = odim
self.att = att
self.output_activation_fn = output_activation_fn
self.cumulate_att_w = cumulate_att_w
self.use_concate = use_concate
self.reduction_factor = reduction_factor
# check attention type
if isinstance(self.att, AttForwardTA):
self.use_att_extra_inputs = True
else:
self.use_att_extra_inputs = False
# define lstm network
prenet_units = prenet_units if prenet_layers != 0 else odim
self.lstm = nn.LayerList()
for layer in six.moves.range(dlayers):
iunits = idim + prenet_units if layer == 0 else dunits
lstm = nn.LSTMCell(iunits, dunits)
if zoneout_rate > 0.0:
lstm = ZoneOutCell(lstm, zoneout_rate)
self.lstm.append(lstm)
# define prenet
if prenet_layers > 0:
self.prenet = Prenet(
idim=odim,
n_layers=prenet_layers,
n_units=prenet_units,
dropout_rate=dropout_rate, )
else:
self.prenet = None
# define postnet
if postnet_layers > 0:
self.postnet = Postnet(
idim=idim,
odim=odim,
n_layers=postnet_layers,
n_chans=postnet_chans,
n_filts=postnet_filts,
use_batch_norm=use_batch_norm,
dropout_rate=dropout_rate, )
else:
self.postnet = None
# define projection layers
iunits = idim + dunits if use_concate else dunits
self.feat_out = nn.Linear(
iunits, odim * reduction_factor, bias_attr=False)
self.prob_out = nn.Linear(iunits, reduction_factor)
# initialize
# self.apply(decoder_init)
def _zero_state(self, hs):
init_hs = paddle.zeros([paddle.shape(hs)[0], self.lstm[0].hidden_size])
return init_hs
def forward(self, hs, hlens, ys):
"""Calculate forward propagation.
Parameters
----------
hs : Tensor
Batch of the sequences of padded hidden states (B, Tmax, idim).
hlens : Tensor(int64) padded
Batch of lengths of each input batch (B,).
ys : Tensor
Batch of the sequences of padded target features (B, Lmax, odim).
Returns
----------
Tensor
Batch of output tensors after postnet (B, Lmax, odim).
Tensor
Batch of output tensors before postnet (B, Lmax, odim).
Tensor
Batch of logits of stop prediction (B, Lmax).
Tensor
Batch of attention weights (B, Lmax, Tmax).
Note
----------
This computation is performed in teacher-forcing manner.
"""
# thin out frames (B, Lmax, odim) -> (B, Lmax/r, odim)
if self.reduction_factor > 1:
ys = ys[:, self.reduction_factor - 1::self.reduction_factor]
# length list should be list of int
# hlens = list(map(int, hlens))
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in six.moves.range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = paddle.zeros([paddle.shape(hs)[0], self.odim])
# initialize attention
prev_att_w = None
self.att.reset()
# loop for an output sequence
outs, logits, att_ws = [], [], []
for y in ys.transpose([1, 0, 2]):
if self.use_att_extra_inputs:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w,
prev_out)
else:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w)
prenet_out = self.prenet(
prev_out) if self.prenet is not None else prev_out
xs = paddle.concat([att_c, prenet_out], axis=1)
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[0](xs, (z_list[0], c_list[0]))
z_list[0], c_list[0] = next_hidden
for i in six.moves.range(1, len(self.lstm)):
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[i](z_list[i - 1],
(z_list[i], c_list[i]))
z_list[i], c_list[i] = next_hidden
zcs = (paddle.concat([z_list[-1], att_c], axis=1)
if self.use_concate else z_list[-1])
outs += [
self.feat_out(zcs).reshape([paddle.shape(hs)[0], self.odim, -1])
]
logits += [self.prob_out(zcs)]
att_ws += [att_w]
# teacher forcing
prev_out = y
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
# (B, Lmax)
logits = paddle.concat(logits, axis=1)
# (B, odim, Lmax)
before_outs = paddle.concat(outs, axis=2)
# (B, Lmax, Tmax)
att_ws = paddle.stack(att_ws, axis=1)
if self.reduction_factor > 1:
# (B, odim, Lmax)
before_outs = before_outs.reshape(
[paddle.shape(before_outs)[0], self.odim, -1])
if self.postnet is not None:
# (B, odim, Lmax)
after_outs = before_outs + self.postnet(before_outs)
else:
after_outs = before_outs
# (B, Lmax, odim)
before_outs = before_outs.transpose([0, 2, 1])
# (B, Lmax, odim)
after_outs = after_outs.transpose([0, 2, 1])
logits = logits
# apply activation function for scaling
if self.output_activation_fn is not None:
before_outs = self.output_activation_fn(before_outs)
after_outs = self.output_activation_fn(after_outs)
return after_outs, before_outs, logits, att_ws
def inference(
self,
h,
threshold=0.5,
minlenratio=0.0,
maxlenratio=10.0,
use_att_constraint=False,
backward_window=None,
forward_window=None, ):
"""Generate the sequence of features given the sequences of characters.
Parameters
----------
h : Tensor
Input sequence of encoder hidden states (T, C).
threshold : float, optional
Threshold to stop generation.
minlenratio : float, optional
Minimum length ratio.
If set to 1.0 and the length of input is 10,
the minimum length of outputs will be 10 * 1 = 10.
minlenratio : float, optional
Minimum length ratio.
If set to 10 and the length of input is 10,
the maximum length of outputs will be 10 * 10 = 100.
use_att_constraint : bool
Whether to apply attention constraint introduced in `Deep Voice 3`_.
backward_window : int
Backward window size in attention constraint.
forward_window : int
Forward window size in attention constraint.
Returns
----------
Tensor
Output sequence of features (L, odim).
Tensor
Output sequence of stop probabilities (L,).
Tensor
Attention weights (L, T).
Note
----------
This computation is performed in auto-regressive manner.
.. _`Deep Voice 3`: https://arxiv.org/abs/1710.07654
"""
# setup
assert len(paddle.shape(h)) == 2
hs = h.unsqueeze(0)
ilens = paddle.shape(h)[0]
maxlen = int(paddle.shape(h)[0] * maxlenratio)
minlen = int(paddle.shape(h)[0] * minlenratio)
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in six.moves.range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = paddle.zeros([1, self.odim])
# initialize attention
prev_att_w = None
self.att.reset()
# setup for attention constraint
if use_att_constraint:
last_attended_idx = 0
else:
last_attended_idx = None
# loop for an output sequence
idx = 0
outs, att_ws, probs = [], [], []
while True:
# updated index
idx += self.reduction_factor
# decoder calculation
if self.use_att_extra_inputs:
att_c, att_w = self.att(
hs,
ilens,
z_list[0],
prev_att_w,
prev_out,
last_attended_idx=last_attended_idx,
backward_window=backward_window,
forward_window=forward_window, )
else:
att_c, att_w = self.att(
hs,
ilens,
z_list[0],
prev_att_w,
last_attended_idx=last_attended_idx,
backward_window=backward_window,
forward_window=forward_window, )
att_ws += [att_w]
prenet_out = self.prenet(
prev_out) if self.prenet is not None else prev_out
xs = paddle.concat([att_c, prenet_out], axis=1)
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[0](xs, (z_list[0], c_list[0]))
z_list[0], c_list[0] = next_hidden
for i in six.moves.range(1, len(self.lstm)):
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[i](z_list[i - 1],
(z_list[i], c_list[i]))
z_list[i], c_list[i] = next_hidden
zcs = (paddle.concat([z_list[-1], att_c], axis=1)
if self.use_concate else z_list[-1])
# [(1, odim, r), ...]
outs += [self.feat_out(zcs).reshape([1, self.odim, -1])]
# [(r), ...]
probs += [F.sigmoid(self.prob_out(zcs))[0]]
if self.output_activation_fn is not None:
prev_out = self.output_activation_fn(
outs[-1][:, :, -1]) # (1, odim)
else:
prev_out = outs[-1][:, :, -1] # (1, odim)
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
if use_att_constraint:
last_attended_idx = int(att_w.argmax())
# check whether to finish generation
if sum(paddle.cast(probs[-1] >= threshold,
'int64')) > 0 or idx >= maxlen:
# check mininum length
if idx < minlen:
continue
# (1, odim, L)
outs = paddle.concat(outs, axis=2)
if self.postnet is not None:
# (1, odim, L)
outs = outs + self.postnet(outs)
# (L, odim)
outs = outs.transpose([0, 2, 1]).squeeze(0)
probs = paddle.concat(probs, axis=0)
att_ws = paddle.concat(att_ws, axis=0)
break
if self.output_activation_fn is not None:
outs = self.output_activation_fn(outs)
return outs, probs, att_ws
def calculate_all_attentions(self, hs, hlens, ys):
"""Calculate all of the attention weights.
Parameters
----------
hs : Tensor
Batch of the sequences of padded hidden states (B, Tmax, idim).
hlens : Tensor(int64)
Batch of lengths of each input batch (B,).
ys : Tensor
Batch of the sequences of padded target features (B, Lmax, odim).
Returns
----------
numpy.ndarray
Batch of attention weights (B, Lmax, Tmax).
Note
----------
This computation is performed in teacher-forcing manner.
"""
# thin out frames (B, Lmax, odim) -> (B, Lmax/r, odim)
if self.reduction_factor > 1:
ys = ys[:, self.reduction_factor - 1::self.reduction_factor]
# length list should be list of int
hlens = list(map(int, hlens))
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in six.moves.range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = paddle.zeros([paddle.shape(hs)[0], self.odim])
# initialize attention
prev_att_w = None
self.att.reset()
# loop for an output sequence
att_ws = []
for y in ys.transpose([1, 0, 2]):
if self.use_att_extra_inputs:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w,
prev_out)
else:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w)
att_ws += [att_w]
prenet_out = self.prenet(
prev_out) if self.prenet is not None else prev_out
xs = paddle.concat([att_c, prenet_out], axis=1)
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[0](xs, (z_list[0], c_list[0]))
z_list[0], c_list[0] = next_hidden
for i in six.moves.range(1, len(self.lstm)):
z_list[i], c_list[i] = self.lstm[i](z_list[i - 1],
(z_list[i], c_list[i]))
# teacher forcing
prev_out = y
if self.cumulate_att_w and prev_att_w is not None:
# Note: error when use +=
prev_att_w = prev_att_w + att_w
else:
prev_att_w = att_w
# (B, Lmax, Tmax)
att_ws = paddle.stack(att_ws, axis=1)
return att_ws
| [((61, 22, 61, 36), 'paddle.nn.LayerList', 'nn.LayerList', ({}, {}), '()', False, 'from paddle import nn\n'), ((62, 21, 62, 46), 'six.moves.range', 'six.moves.range', ({(62, 37, 62, 45): 'n_layers'}, {}), '(n_layers)', False, 'import six\n'), ((131, 23, 131, 37), 'paddle.nn.LayerList', 'nn.LayerList', ({}, {}), '()', False, 'from paddle import nn\n'), ((132, 21, 132, 50), 'six.moves.range', 'six.moves.range', ({(132, 37, 132, 49): '(n_layers - 1)'}, {}), '(n_layers - 1)', False, 'import six\n'), ((362, 20, 362, 34), 'paddle.nn.LayerList', 'nn.LayerList', ({}, {}), '()', False, 'from paddle import nn\n'), ((363, 21, 363, 45), 'six.moves.range', 'six.moves.range', ({(363, 37, 363, 44): 'dlayers'}, {}), '(dlayers)', False, 'import six\n'), ((395, 24, 396, 61), 'paddle.nn.Linear', 'nn.Linear', (), '', False, 'from paddle import nn\n'), ((397, 24, 397, 59), 'paddle.nn.Linear', 'nn.Linear', ({(397, 34, 397, 40): 'iunits', (397, 42, 397, 58): 'reduction_factor'}, {}), '(iunits, reduction_factor)', False, 'from paddle import nn\n'), ((482, 17, 482, 46), 'paddle.concat', 'paddle.concat', (), '', False, 'import paddle\n'), ((484, 22, 484, 49), 'paddle.concat', 'paddle.concat', (), '', False, 'import paddle\n'), ((486, 17, 486, 45), 'paddle.stack', 'paddle.stack', (), '', False, 'import paddle\n'), ((567, 19, 567, 47), 'paddle.zeros', 'paddle.zeros', ({(567, 32, 567, 46): '[1, self.odim]'}, {}), '([1, self.odim])', False, 'import paddle\n'), ((723, 17, 723, 45), 'paddle.stack', 'paddle.stack', (), '', False, 'import paddle\n'), ((365, 19, 365, 46), 'paddle.nn.LSTMCell', 'nn.LSTMCell', ({(365, 31, 365, 37): 'iunits', (365, 39, 365, 45): 'dunits'}, {}), '(iunits, dunits)', False, 'from paddle import nn\n'), ((459, 17, 459, 59), 'paddle.concat', 'paddle.concat', (), '', False, 'import paddle\n'), ((557, 16, 557, 31), 'paddle.shape', 'paddle.shape', ({(557, 29, 557, 30): 'h'}, {}), '(h)', False, 'import paddle\n'), ((610, 17, 610, 59), 'paddle.concat', 'paddle.concat', (), '', False, 'import paddle\n'), ((708, 17, 708, 59), 'paddle.concat', 'paddle.concat', (), '', False, 'import paddle\n'), ((468, 19, 468, 61), 'paddle.concat', 'paddle.concat', (), '', False, 'import paddle\n'), ((555, 19, 555, 34), 'paddle.shape', 'paddle.shape', ({(555, 32, 555, 33): 'h'}, {}), '(h)', False, 'import paddle\n'), ((619, 19, 619, 61), 'paddle.concat', 'paddle.concat', (), '', False, 'import paddle\n'), ((645, 23, 645, 50), 'paddle.concat', 'paddle.concat', (), '', False, 'import paddle\n'), ((651, 24, 651, 52), 'paddle.concat', 'paddle.concat', (), '', False, 'import paddle\n'), ((652, 25, 652, 54), 'paddle.concat', 'paddle.concat', (), '', False, 'import paddle\n'), ((65, 30, 65, 58), 'paddle.nn.Linear', 'nn.Linear', ({(65, 40, 65, 48): 'n_inputs', (65, 50, 65, 57): 'n_units'}, {}), '(n_inputs, n_units)', False, 'from paddle import nn\n'), ((65, 60, 65, 69), 'paddle.nn.ReLU', 'nn.ReLU', ({}, {}), '()', False, 'from paddle import nn\n'), ((164, 20, 170, 42), 'paddle.nn.Conv1D', 'nn.Conv1D', (), '', False, 'from paddle import nn\n'), ((171, 20, 171, 40), 'paddle.nn.BatchNorm1D', 'nn.BatchNorm1D', ({(171, 35, 171, 39): 'odim'}, {}), '(odim)', False, 'from paddle import nn\n'), ((172, 20, 172, 44), 'paddle.nn.Dropout', 'nn.Dropout', ({(172, 31, 172, 43): 'dropout_rate'}, {}), '(dropout_rate)', False, 'from paddle import nn\n'), ((176, 20, 182, 42), 'paddle.nn.Conv1D', 'nn.Conv1D', (), '', False, 'from paddle import nn\n'), ((183, 20, 183, 44), 'paddle.nn.Dropout', 'nn.Dropout', ({(183, 31, 183, 43): 'dropout_rate'}, {}), '(dropout_rate)', False, 'from paddle import nn\n'), ((403, 32, 403, 48), 'paddle.shape', 'paddle.shape', ({(403, 45, 403, 47): 'hs'}, {}), '(hs)', False, 'import paddle\n'), ((443, 33, 443, 49), 'paddle.shape', 'paddle.shape', ({(443, 46, 443, 48): 'hs'}, {}), '(hs)', False, 'import paddle\n'), ((558, 21, 558, 36), 'paddle.shape', 'paddle.shape', ({(558, 34, 558, 35): 'h'}, {}), '(h)', False, 'import paddle\n'), ((559, 21, 559, 36), 'paddle.shape', 'paddle.shape', ({(559, 34, 559, 35): 'h'}, {}), '(h)', False, 'import paddle\n'), ((691, 33, 691, 49), 'paddle.shape', 'paddle.shape', ({(691, 46, 691, 48): 'hs'}, {}), '(hs)', False, 'import paddle\n'), ((138, 24, 144, 46), 'paddle.nn.Conv1D', 'nn.Conv1D', (), '', False, 'from paddle import nn\n'), ((145, 24, 145, 46), 'paddle.nn.BatchNorm1D', 'nn.BatchNorm1D', ({(145, 39, 145, 45): 'ochans'}, {}), '(ochans)', False, 'from paddle import nn\n'), ((146, 24, 146, 33), 'paddle.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from paddle import nn\n'), ((147, 24, 147, 48), 'paddle.nn.Dropout', 'nn.Dropout', ({(147, 35, 147, 47): 'dropout_rate'}, {}), '(dropout_rate)', False, 'from paddle import nn\n'), ((151, 24, 157, 46), 'paddle.nn.Conv1D', 'nn.Conv1D', (), '', False, 'from paddle import nn\n'), ((158, 24, 158, 33), 'paddle.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from paddle import nn\n'), ((159, 24, 159, 48), 'paddle.nn.Dropout', 'nn.Dropout', ({(159, 35, 159, 47): 'dropout_rate'}, {}), '(dropout_rate)', False, 'from paddle import nn\n'), ((491, 17, 491, 42), 'paddle.shape', 'paddle.shape', ({(491, 30, 491, 41): 'before_outs'}, {}), '(before_outs)', False, 'import paddle\n'), ((639, 19, 640, 39), 'paddle.cast', 'paddle.cast', ({(639, 31, 639, 53): '(probs[-1] >= threshold)', (640, 31, 640, 38): '"""int64"""'}, {}), "(probs[-1] >= threshold, 'int64')", False, 'import paddle\n'), ((471, 44, 471, 60), 'paddle.shape', 'paddle.shape', ({(471, 57, 471, 59): 'hs'}, {}), '(hs)', False, 'import paddle\n'), ((269, 50, 269, 65), 'paddle.shape', 'paddle.shape', ({(269, 63, 269, 64): 'h'}, {}), '(h)', False, 'import paddle\n')] |
Mattlk13/pyBlazing | pyblazing/__init__.py | 5c3042c510ab17e9f9d1647e1873d3d04313d900 | from .api import run_query_get_token
from .api import convert_to_dask
from .api import run_query_get_results
from .api import run_query_get_concat_results
from .api import register_file_system
from .api import deregister_file_system
from .api import FileSystemType, DriverType, EncryptionType
from .api import SchemaFrom
from .api import create_table
from .api import ResultSetHandle
from .api import _get_client
from .api import gdf_dtype
from .api import get_dtype_values
from .api import get_np_dtype_to_gdf_dtype
from .api import SetupOrchestratorConnection
from .apiv2.context import make_default_orc_arg
from .apiv2.context import make_default_csv_arg
| [] |
apurwaj2/df-on-k8s | bootstrap/p1.5.0/src/common/const.py | 1aecb0bc293d008c5a2384df32ad434bfcc51caa |
class Constants(object):
LOGGER_CONF = "common/mapr_conf/logger.yml"
USERNAME = "mapr"
GROUPNAME = "mapr"
USERID = 5000
GROUPID = 5000
ADMIN_USERNAME = "custadmin"
ADMIN_GROUPNAME = "custadmin"
ADMIN_USERID = 7000
ADMIN_GROUPID = 7000
ADMIN_PASS = "mapr"
MYSQL_USER = "admin"
MYSQL_PASS = "mapr"
LDAPADMIN_USER = "admin"
LDAPADMIN_PASS = "mapr"
LDAPBIND_USER = "readonly"
LDAPBIND_PASS = "mapr"
EXAMPLE_LDAP_NAMESPACE = "hpe-ldap"
CSI_REPO = "quay.io/k8scsi"
KDF_REPO = "docker.io/maprtech" #registry.hub.docker.com/maprtech
KUBEFLOW_REPO = "gcr.io/mapr-252711/kf-ecp-5.3.0"
OPERATOR_REPO = "gcr.io/mapr-252711"
KUBELET_DIR = "/var/lib/kubelet"
ECP_KUBELET_DIR = "/var/lib/docker/kubelet"
LOCAL_PATH_PROVISIONER_REPO= ""
KFCTL_HSP_ISTIO_REPO = ""
BUSYBOX_REPO = ""
def enum(**named_values):
return type('Enum', (), named_values)
AUTH_TYPES = enum(CUSTOM_LDAP='customLDAP', RAW_LINUX_USERS='rawLinuxUsers', EXAMPLE_LDAP='exampleLDAP')
# OPEN SSL
OPENSSL = '/usr/bin/openssl'
KEY_SIZE = 1024
DAYS = 3650
CA_CERT = 'ca.cert'
CA_KEY = 'ca.key'
# http://www.openssl.org/docs/apps/openssl.html#PASS_PHRASE_ARGUMENTS
X509_EXTRA_ARGS = ()
OPENSSL_CONFIG_TEMPLATE = """
prompt = no
distinguished_name = req_distinguished_name
req_extensions = v3_req
[ req_distinguished_name ]
C = US
ST = CO
L = Fort Collins
O = HPE
OU = HCP
CN = %(service)s
emailAddress = [email protected]
[ v3_req ]
# Extensions to add to a certificate request
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[ alt_names ]
DNS.1 = %(service)s
DNS.2 = %(service)s.%(namespace)s
DNS.3 = %(service)s.%(namespace)s.svc
"""
| [] |
jeonghyunkeem/PointGroup | dataset/scan2cad/s2c_collect_pgroup.py | fa90830259aeb37d2e0f203471552d2f43cbc60b | # Jeonghyun Kim, UVR KAIST @jeonghyunct.kaist.ac.kr
import os, sys
import json
import h5py
import numpy as np
import quaternion
import torch
from torch.utils.data import Dataset
BASE_DIR_1 = os.path.dirname(os.path.abspath(__file__)) # scan2cad
BASE_DIR = os.path.dirname(BASE_DIR_1) # dataset
ROOT_DIR = os.path.dirname(BASE_DIR) # PointGroup
DATA_DIR = os.path.dirname(ROOT_DIR) # /root/
DATA_DIR = os.path.join(DATA_DIR, 'Dataset') # /root/Dataset
DUMP_DIR = os.path.join(ROOT_DIR, 'data')
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
from s2c_map import CLASS_MAPPING, ID2NAME, CARED_CLASS_MASK
from s2c_config import Scan2CADDatasetConfig
import s2c_utils
sys.path.append(os.path.join(ROOT_DIR, 'models/retrieval/'))
DC = Scan2CADDatasetConfig()
MAX_NUM_POINT = 50000
MAX_NUM_OBJ = 64
INS_NUM_POINT = 2048
FEATURE_DIMENSION = 512
MAX_DATA_SIZE = 15000
CHUNK_SIZE = 1000
INF = 9999
NOT_CARED_ID = np.array([INF]) # wall, floor
# Thresholds
PADDING = 0.05
SCALE_THRASHOLD = 0.05
SEG_THRESHOLD = 1
REMAPPER = np.ones(35, dtype=np.int64) * (-1)
for i, x in enumerate(CARED_CLASS_MASK):
REMAPPER[x] = i
print(f'REMAPPER[{x:2d}] => {i:2d}')
SYM2CLASS = {"__SYM_NONE": 0, "__SYM_ROTATE_UP_2": 1, "__SYM_ROTATE_UP_4": 2, "__SYM_ROTATE_UP_INF": 3}
# functions ==============================================================================================
def from_q_to_6d(q):
q = np.quaternion(q[0], q[1], q[2], q[3])
mat = quaternion.as_rotation_matrix(q) # 3x3
rep6d = mat[:, 0:2].transpose().reshape(-1, 6) # 6
return rep6d
def nn_search(p, ps):
target = torch.from_numpy(ps.copy())
p = torch.from_numpy(p.copy())
p_diff = target - p
p_dist = torch.sum(p_diff**2, dim=-1)
dist, idx = torch.min(p_dist, dim=-1)
return dist.item(), idx.item()
def make_M_from_tqs(t, q, s):
q = np.quaternion(q[0], q[1], q[2], q[3])
T = np.eye(4)
T[0:3, 3] = t
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(s)
M = T.dot(R).dot(S)
return M
def compose_mat4(t, q, s, center=None):
if not isinstance(q, np.quaternion):
q = np.quaternion(q[0], q[1], q[2], q[3])
T = np.eye(4)
T[0:3, 3] = t
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(s)
C = np.eye(4)
if center is not None:
C[0:3, 3] = center
M = T.dot(R).dot(S).dot(C)
return M
def decompose_mat4(M):
R = M[0:3, 0:3].copy()
sx = np.linalg.norm(R[0:3, 0])
sy = np.linalg.norm(R[0:3, 1])
sz = np.linalg.norm(R[0:3, 2])
s = np.array([sx, sy, sz])
R[:,0] /= sx
R[:,1] /= sy
R[:,2] /= sz
q = quaternion.from_rotation_matrix(R[0:3, 0:3])
t = M[0:3, 3]
return t, q, s
# ========================================================================================================
LOG_N = 100
def print_log(log):
print('-'*LOG_N+'\n'+log+' \n'+'-'*LOG_N)
class Scan2CADCollect(Dataset):
def __init__(self, split_set='train', distr_check=False):
self.data_path = os.path.join(DATA_DIR, 'Scan2CAD/export')
self.out_path = os.path.join(BASE_DIR_1, 'data4')
if not os.path.exists(self.out_path):
os.mkdir(self.out_path)
print("Create export directory: {}".format(self.out_path))
all_scan_names = list(set([os.path.basename(x)[0:12] \
for x in os.listdir(self.data_path) if x.startswith('scene')]))
self.scan_names = []
if split_set in ['all', 'train', 'val', 'test']:
split_filenames = os.path.join(BASE_DIR_1, 'meta_data',
'scan2cad_{}.txt'.format(split_set))
with open(split_filenames, 'r') as f:
self.scan_list = f.read().splitlines()
# remove unavailiable scans
num_scans = len(self.scan_list)
self.scan_list = [sname for sname in self.scan_list \
if sname in all_scan_names]
print_log('Dataset for {}: kept {} scans out of {}'.format(split_set, len(self.scan_list), num_scans))
num_scans = len(self.scan_list)
else:
print('illegal split name')
return
filename_json = BASE_DIR_1 + "/full_annotations.json"
assert filename_json
self.dataset = {}
cat_summary = dict.fromkeys(DC.ClassToName, 0)
cat_ids = []
with open(filename_json, 'r') as f:
data = json.load(f)
d = {}
i = -1
for idx, r in enumerate(data):
i_scan = r["id_scan"]
if i_scan not in self.scan_list:
continue
self.scan_names.append(i_scan)
i += 1
d[i] = {}
d[i]['id_scan'] = i_scan
d[i]['trs'] = r["trs"]
n_model = r["n_aligned_models"]
d[i]['n_total'] = n_model
d[i]['models'] = {}
for j in range(n_model):
d[i]['models'][j] = {}
d[i]['models'][j]['trs'] = r["aligned_models"][j]['trs']
d[i]['models'][j]['center'] = r["aligned_models"][j]['center']
d[i]['models'][j]['bbox'] = r["aligned_models"][j]['bbox']
d[i]['models'][j]['sym'] = SYM2CLASS[r["aligned_models"][j]['sym']]
d[i]['models'][j]['fname'] = r["aligned_models"][j]['id_cad']
cat_id = r["aligned_models"][j]['catid_cad']
cat_ids.append(cat_id)
d[i]['models'][j]['cat_id'] = cat_id
cat_class = DC.ShapenetIDtoClass(cat_id)
d[i]['models'][j]['sem_cls'] = cat_class
# category summary
cat_summary[cat_class]+=1
self.dataset = d
self.cat_ids = np.unique(cat_ids)
if distr_check:
for k, v in sorted(cat_summary.items(), key=lambda item:item[1], reverse=True):
print(f'{k:2d}: {DC.ClassToName[k]:12s} => {v:4d}')
def __len__(self):
return len(self.dataset)
def size_check(self, scale, id_scan, sem_cls):
check = False
if scale[0] < SCALE_THRASHOLD:
scale[0] = SCALE_THRASHOLD
check = True
if scale[1] < SCALE_THRASHOLD:
scale[1] = SCALE_THRASHOLD
check = True
if scale[2] < SCALE_THRASHOLD:
scale[2] = SCALE_THRASHOLD
check = True
return scale
def collect(self, N, dump=False):
""" Return dictionary of {verts(x,y,z): cad filename}
Note:
NK = a total number of instances in dataset
V = a number of vertices
args:
N: int
a size of dataset
return:
dict: (NK, 1, V, 3)
a dictionary for verts-cad_file pairs
"""
# ======= GLOBAL LABEL VARIABLES =======
error_scan = {} # Text
# Anchor collection (for detection)
print_log(" LOADING SCENES")
collect_path = os.path.join(BASE_DIR, 'collect')
for index in range(N):
data = self.dataset[index]
id_scan = data['id_scan']
K = data['n_total']
assert(K <= MAX_NUM_OBJ)
# Point Cloud
mesh_vertices = np.load(os.path.join(self.data_path, id_scan) + '_vert.npy') # (N, 3)
semantic_labels = np.load(os.path.join(self.data_path, id_scan) + '_sem_label.npy') # (N, sem_cls(0, 1~35, 36~MAX, INF))
point_cloud = mesh_vertices[:,0:3]
colors = mesh_vertices[:,3:6] / 127.5 - 1
instance_vertices = np.ones((point_cloud.shape[0]), dtype=np.int64) * (-1)
semantic_vertices = np.ones((point_cloud.shape[0]), dtype=np.int64) * (-1)
# Sorting points cropping order to avoid overlapping
sort_by_scale = {}
for model in range(K):
obj_scale = np.array(data['models'][model]['trs']['scale'])
sort_by_scale[model] = np.sum(obj_scale)
model_scale_order = {model: scale for model, scale in sorted(sort_by_scale.items(), key=(lambda item:item[1]), reverse=True)}
K = len(model_scale_order.keys())
# Iterate on scale_order
checked = False
k = -1
for i, model in enumerate(model_scale_order.keys()):
k += 1
# semantics ()
sem_cls = data['models'][model]['sem_cls'] # (0~num_classes-1)
# Transform
obj_center = np.array(data['models'][model]['center'])
obj_translation = np.array(data['models'][model]['trs']['translation'])
obj_rotation = np.array(data['models'][model]['trs']['rotation'])
obj_scale = np.array(data['models'][model]['trs']['scale'])
obj_scale = self.size_check(obj_scale, id_scan, sem_cls)
Mobj = compose_mat4(obj_translation, obj_rotation, obj_scale, obj_center)
# Instance vertices
# - (1) Region Crop & Axis-aligned Bounding Box
vert_choices = np.array([])
ins_bbox = np.array(data['models'][model]['bbox'])
obj_corners = s2c_utils.get_3d_box_rotated(ins_bbox, Mobj, padding=PADDING)
ex_points, obj_vert_ind = s2c_utils.extract_pc_in_box3d(point_cloud, obj_corners)
nx = ex_points.shape[0]
# - (2) Instance Segments Crop
seg_points, vert_choices = \
s2c_utils.filter_dominant_cls(point_cloud, obj_vert_ind, semantic_labels, sem_cls+1, NOT_CARED_ID)
seg_nx = seg_points.shape[0]
# ======= Semantic/Instance vertices =======
if seg_nx < SEG_THRESHOLD:
k -= 1
checked = True
continue
sem_cls = REMAPPER[sem_cls]
# if sem_cls < 0: continue # ignore non-valid class object (only preserve CARED classes)
instance_vertices[vert_choices] = k # (0~K-1) NOTE:unannotated=-1
semantic_vertices[vert_choices] = sem_cls # (0~num_classes-1) NOTE:unannotated=-1
# error check
ins_list = np.unique(instance_vertices)
if (np.max(instance_vertices)+1) != (len(ins_list)-1):
print_log(f"[{index}/{N} Error] Please check this scene --> {id_scan}")
error_scan[id_scan] = 0
continue
# DUMP COLLECT RESULTS
if dump:
scene_path = os.path.join(collect_path, f'{id_scan}')
if not os.path.exists(scene_path):
os.mkdir(scene_path)
print("Created scene directory: {}".format(scene_path))
s2c_utils.write_scene_results(points=point_cloud, ins_points=instance_vertices, num_instances=K, bboxes=None, file_path=scene_path)
point_cloud = np.ascontiguousarray(point_cloud[:, :3] - point_cloud[:, :3].mean(0))
pcoord = point_cloud.astype(np.float64)
colors = colors.astype(np.float32)
sem_labels = semantic_vertices.astype(np.float64)
ins_labels = instance_vertices.astype(np.float64)
# ============ DUMP ============
# scene data
file_path = os.path.join(self.out_path, id_scan+'_inst.pth')
torch.save((pcoord, colors, sem_labels, ins_labels), file_path)
print(f"[{index}/{N} Saved] {id_scan} >>> {file_path}")
# error scan
with open(self.out_path+'/error_scan.txt', 'w') as f:
print_log("ERROR SCAN")
for i, sname in enumerate(error_scan.keys()):
print('{:2d}: {}'.format(i, sname))
f.write(sname)
f.write('\n')
if __name__ == "__main__":
Dataset = Scan2CADCollect(split_set='all', distr_check=True)
N = len(Dataset)
Dataset.collect(N, dump=False) | [((12, 11, 12, 38), 'os.path.dirname', 'os.path.dirname', ({(12, 27, 12, 37): 'BASE_DIR_1'}, {}), '(BASE_DIR_1)', False, 'import os, sys\n'), ((13, 11, 13, 36), 'os.path.dirname', 'os.path.dirname', ({(13, 27, 13, 35): 'BASE_DIR'}, {}), '(BASE_DIR)', False, 'import os, sys\n'), ((14, 11, 14, 36), 'os.path.dirname', 'os.path.dirname', ({(14, 27, 14, 35): 'ROOT_DIR'}, {}), '(ROOT_DIR)', False, 'import os, sys\n'), ((15, 11, 15, 44), 'os.path.join', 'os.path.join', ({(15, 24, 15, 32): 'DATA_DIR', (15, 34, 15, 43): '"""Dataset"""'}, {}), "(DATA_DIR, 'Dataset')", False, 'import os, sys\n'), ((16, 11, 16, 41), 'os.path.join', 'os.path.join', ({(16, 24, 16, 32): 'ROOT_DIR', (16, 34, 16, 40): '"""data"""'}, {}), "(ROOT_DIR, 'data')", False, 'import os, sys\n'), ((17, 0, 17, 25), 'sys.path.append', 'sys.path.append', ({(17, 16, 17, 24): 'BASE_DIR'}, {}), '(BASE_DIR)', False, 'import os, sys\n'), ((18, 0, 18, 25), 'sys.path.append', 'sys.path.append', ({(18, 16, 18, 24): 'ROOT_DIR'}, {}), '(ROOT_DIR)', False, 'import os, sys\n'), ((26, 5, 26, 28), 's2c_config.Scan2CADDatasetConfig', 'Scan2CADDatasetConfig', ({}, {}), '()', False, 'from s2c_config import Scan2CADDatasetConfig\n'), ((36, 15, 36, 30), 'numpy.array', 'np.array', ({(36, 24, 36, 29): '[INF]'}, {}), '([INF])', True, 'import numpy as np\n'), ((11, 29, 11, 54), 'os.path.abspath', 'os.path.abspath', ({(11, 45, 11, 53): '__file__'}, {}), '(__file__)', False, 'import os, sys\n'), ((24, 16, 24, 59), 'os.path.join', 'os.path.join', ({(24, 29, 24, 37): 'ROOT_DIR', (24, 39, 24, 58): '"""models/retrieval/"""'}, {}), "(ROOT_DIR, 'models/retrieval/')", False, 'import os, sys\n'), ((43, 11, 43, 38), 'numpy.ones', 'np.ones', (), '', True, 'import numpy as np\n'), ((51, 8, 51, 45), 'numpy.quaternion', 'np.quaternion', ({(51, 22, 51, 26): 'q[0]', (51, 28, 51, 32): 'q[1]', (51, 34, 51, 38): 'q[2]', (51, 40, 51, 44): 'q[3]'}, {}), '(q[0], q[1], q[2], q[3])', True, 'import numpy as np\n'), ((52, 10, 52, 42), 'quaternion.as_rotation_matrix', 'quaternion.as_rotation_matrix', ({(52, 40, 52, 41): 'q'}, {}), '(q)', False, 'import quaternion\n'), ((60, 13, 60, 41), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((61, 16, 61, 41), 'torch.min', 'torch.min', (), '', False, 'import torch\n'), ((65, 8, 65, 45), 'numpy.quaternion', 'np.quaternion', ({(65, 22, 65, 26): 'q[0]', (65, 28, 65, 32): 'q[1]', (65, 34, 65, 38): 'q[2]', (65, 40, 65, 44): 'q[3]'}, {}), '(q[0], q[1], q[2], q[3])', True, 'import numpy as np\n'), ((66, 8, 66, 17), 'numpy.eye', 'np.eye', ({(66, 15, 66, 16): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((68, 8, 68, 17), 'numpy.eye', 'np.eye', ({(68, 15, 68, 16): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((69, 18, 69, 50), 'quaternion.as_rotation_matrix', 'quaternion.as_rotation_matrix', ({(69, 48, 69, 49): 'q'}, {}), '(q)', False, 'import quaternion\n'), ((70, 8, 70, 17), 'numpy.eye', 'np.eye', ({(70, 15, 70, 16): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((71, 18, 71, 28), 'numpy.diag', 'np.diag', ({(71, 26, 71, 27): 's'}, {}), '(s)', True, 'import numpy as np\n'), ((79, 8, 79, 17), 'numpy.eye', 'np.eye', ({(79, 15, 79, 16): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((81, 8, 81, 17), 'numpy.eye', 'np.eye', ({(81, 15, 81, 16): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((82, 18, 82, 50), 'quaternion.as_rotation_matrix', 'quaternion.as_rotation_matrix', ({(82, 48, 82, 49): 'q'}, {}), '(q)', False, 'import quaternion\n'), ((83, 8, 83, 17), 'numpy.eye', 'np.eye', ({(83, 15, 83, 16): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((84, 18, 84, 28), 'numpy.diag', 'np.diag', ({(84, 26, 84, 27): 's'}, {}), '(s)', True, 'import numpy as np\n'), ((86, 8, 86, 17), 'numpy.eye', 'np.eye', ({(86, 15, 86, 16): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((95, 9, 95, 34), 'numpy.linalg.norm', 'np.linalg.norm', ({(95, 24, 95, 33): 'R[0:3, (0)]'}, {}), '(R[0:3, (0)])', True, 'import numpy as np\n'), ((96, 9, 96, 34), 'numpy.linalg.norm', 'np.linalg.norm', ({(96, 24, 96, 33): 'R[0:3, (1)]'}, {}), '(R[0:3, (1)])', True, 'import numpy as np\n'), ((97, 9, 97, 34), 'numpy.linalg.norm', 'np.linalg.norm', ({(97, 24, 97, 33): 'R[0:3, (2)]'}, {}), '(R[0:3, (2)])', True, 'import numpy as np\n'), ((99, 8, 99, 30), 'numpy.array', 'np.array', ({(99, 17, 99, 29): '[sx, sy, sz]'}, {}), '([sx, sy, sz])', True, 'import numpy as np\n'), ((105, 8, 105, 52), 'quaternion.from_rotation_matrix', 'quaternion.from_rotation_matrix', ({(105, 40, 105, 51): 'R[0:3, 0:3]'}, {}), '(R[0:3, 0:3])', False, 'import quaternion\n'), ((78, 12, 78, 49), 'numpy.quaternion', 'np.quaternion', ({(78, 26, 78, 30): 'q[0]', (78, 32, 78, 36): 'q[1]', (78, 38, 78, 42): 'q[2]', (78, 44, 78, 48): 'q[3]'}, {}), '(q[0], q[1], q[2], q[3])', True, 'import numpy as np\n'), ((117, 25, 117, 66), 'os.path.join', 'os.path.join', ({(117, 38, 117, 46): 'DATA_DIR', (117, 48, 117, 65): '"""Scan2CAD/export"""'}, {}), "(DATA_DIR, 'Scan2CAD/export')", False, 'import os, sys\n'), ((118, 24, 118, 57), 'os.path.join', 'os.path.join', ({(118, 37, 118, 47): 'BASE_DIR_1', (118, 49, 118, 56): '"""data4"""'}, {}), "(BASE_DIR_1, 'data4')", False, 'import os, sys\n'), ((180, 23, 180, 41), 'numpy.unique', 'np.unique', ({(180, 33, 180, 40): 'cat_ids'}, {}), '(cat_ids)', True, 'import numpy as np\n'), ((222, 23, 222, 56), 'os.path.join', 'os.path.join', ({(222, 36, 222, 44): 'BASE_DIR', (222, 46, 222, 55): '"""collect"""'}, {}), "(BASE_DIR, 'collect')", False, 'import os, sys\n'), ((119, 15, 119, 44), 'os.path.exists', 'os.path.exists', ({(119, 30, 119, 43): 'self.out_path'}, {}), '(self.out_path)', False, 'import os, sys\n'), ((120, 12, 120, 35), 'os.mkdir', 'os.mkdir', ({(120, 21, 120, 34): 'self.out_path'}, {}), '(self.out_path)', False, 'import os, sys\n'), ((148, 19, 148, 31), 'json.load', 'json.load', ({(148, 29, 148, 30): 'f'}, {}), '(f)', False, 'import json\n'), ((287, 23, 287, 51), 'numpy.unique', 'np.unique', ({(287, 33, 287, 50): 'instance_vertices'}, {}), '(instance_vertices)', True, 'import numpy as np\n'), ((309, 24, 309, 72), 'os.path.join', 'os.path.join', ({(309, 37, 309, 50): 'self.out_path', (309, 52, 309, 71): "id_scan + '_inst.pth'"}, {}), "(self.out_path, id_scan + '_inst.pth')", False, 'import os, sys\n'), ((310, 12, 310, 75), 'torch.save', 'torch.save', ({(310, 23, 310, 63): '(pcoord, colors, sem_labels, ins_labels)', (310, 65, 310, 74): 'file_path'}, {}), '((pcoord, colors, sem_labels, ins_labels), file_path)', False, 'import torch\n'), ((236, 32, 236, 79), 'numpy.ones', 'np.ones', (), '', True, 'import numpy as np\n'), ((237, 32, 237, 79), 'numpy.ones', 'np.ones', (), '', True, 'import numpy as np\n'), ((242, 28, 242, 75), 'numpy.array', 'np.array', ({(242, 37, 242, 74): "data['models'][model]['trs']['scale']"}, {}), "(data['models'][model]['trs']['scale'])", True, 'import numpy as np\n'), ((243, 39, 243, 56), 'numpy.sum', 'np.sum', ({(243, 46, 243, 55): 'obj_scale'}, {}), '(obj_scale)', True, 'import numpy as np\n'), ((256, 29, 256, 70), 'numpy.array', 'np.array', ({(256, 38, 256, 69): "data['models'][model]['center']"}, {}), "(data['models'][model]['center'])", True, 'import numpy as np\n'), ((257, 34, 257, 87), 'numpy.array', 'np.array', ({(257, 43, 257, 86): "data['models'][model]['trs']['translation']"}, {}), "(data['models'][model]['trs']['translation'])", True, 'import numpy as np\n'), ((258, 31, 258, 81), 'numpy.array', 'np.array', ({(258, 40, 258, 80): "data['models'][model]['trs']['rotation']"}, {}), "(data['models'][model]['trs']['rotation'])", True, 'import numpy as np\n'), ((259, 28, 259, 75), 'numpy.array', 'np.array', ({(259, 37, 259, 74): "data['models'][model]['trs']['scale']"}, {}), "(data['models'][model]['trs']['scale'])", True, 'import numpy as np\n'), ((265, 31, 265, 43), 'numpy.array', 'np.array', ({(265, 40, 265, 42): '[]'}, {}), '([])', True, 'import numpy as np\n'), ((266, 27, 266, 66), 'numpy.array', 'np.array', ({(266, 36, 266, 65): "data['models'][model]['bbox']"}, {}), "(data['models'][model]['bbox'])", True, 'import numpy as np\n'), ((267, 30, 267, 91), 's2c_utils.get_3d_box_rotated', 's2c_utils.get_3d_box_rotated', (), '', False, 'import s2c_utils\n'), ((268, 42, 268, 97), 's2c_utils.extract_pc_in_box3d', 's2c_utils.extract_pc_in_box3d', ({(268, 72, 268, 83): 'point_cloud', (268, 85, 268, 96): 'obj_corners'}, {}), '(point_cloud, obj_corners)', False, 'import s2c_utils\n'), ((272, 20, 272, 118), 's2c_utils.filter_dominant_cls', 's2c_utils.filter_dominant_cls', ({(272, 50, 272, 61): 'point_cloud', (272, 63, 272, 75): 'obj_vert_ind', (272, 77, 272, 92): 'semantic_labels', (272, 94, 272, 103): 'sem_cls + 1', (272, 105, 272, 117): 'NOT_CARED_ID'}, {}), '(point_cloud, obj_vert_ind, semantic_labels, \n sem_cls + 1, NOT_CARED_ID)', False, 'import s2c_utils\n'), ((295, 29, 295, 69), 'os.path.join', 'os.path.join', ({(295, 42, 295, 54): 'collect_path', (295, 56, 295, 68): 'f"""{id_scan}"""'}, {}), "(collect_path, f'{id_scan}')", False, 'import os, sys\n'), ((299, 16, 299, 147), 's2c_utils.write_scene_results', 's2c_utils.write_scene_results', (), '', False, 'import s2c_utils\n'), ((231, 38, 231, 75), 'os.path.join', 'os.path.join', ({(231, 51, 231, 65): 'self.data_path', (231, 67, 231, 74): 'id_scan'}, {}), '(self.data_path, id_scan)', False, 'import os, sys\n'), ((232, 38, 232, 75), 'os.path.join', 'os.path.join', ({(232, 51, 232, 65): 'self.data_path', (232, 67, 232, 74): 'id_scan'}, {}), '(self.data_path, id_scan)', False, 'import os, sys\n'), ((288, 16, 288, 41), 'numpy.max', 'np.max', ({(288, 23, 288, 40): 'instance_vertices'}, {}), '(instance_vertices)', True, 'import numpy as np\n'), ((296, 23, 296, 49), 'os.path.exists', 'os.path.exists', ({(296, 38, 296, 48): 'scene_path'}, {}), '(scene_path)', False, 'import os, sys\n'), ((297, 20, 297, 40), 'os.mkdir', 'os.mkdir', ({(297, 29, 297, 39): 'scene_path'}, {}), '(scene_path)', False, 'import os, sys\n'), ((123, 35, 123, 54), 'os.path.basename', 'os.path.basename', ({(123, 52, 123, 53): 'x'}, {}), '(x)', False, 'import os, sys\n'), ((124, 21, 124, 47), 'os.listdir', 'os.listdir', ({(124, 32, 124, 46): 'self.data_path'}, {}), '(self.data_path)', False, 'import os, sys\n')] |
ryokbys/nap | nappy/msd2diff.py | ddd0b5a5a956f7c335a22adb4f8e00f1d38a7804 | #!/usr/bin/env python
"""
Compute diffusion coefficient from MSD data.
Time interval, DT, is obtained from in.pmd in the same directory.
Usage:
msd2diff.py [options] MSD_FILE
Options:
-h, --help Show this message and exit.
-o, --offset OFFSET
Offset of given data. [default: 0]
--plot Plot a fitted graph. [default: False]
"""
from __future__ import print_function
import os,sys
from docopt import docopt
import numpy as np
__author__ = "RYO KOBAYASHI"
__version__ = "191212"
def read_out_msd(fname='out.msd',offset=0,specorder=[],spc=None):
if specorder == [] or spc not in specorder:
index = 1
else:
index = specorder.index(spc) +1
with open(fname,'r') as f:
lines = f.readlines()
try:
dname = os.path.dirname(fname)
dt = dt_from_inpmd(fname=dname+'/in.pmd')
except Exception as e:
raise RuntimeError('Failed to read in.pmd.')
ts = []
msds = []
n0 = 0
msd0 = 0.0
for il,line in enumerate(lines):
if line[0] == '#':
continue
data = line.split()
if il < offset:
n0 = int(data[0])
msd0 = float(data[index])
continue
n = int(data[0])
msd = float(data[index])
ts.append((n-n0)*dt)
msds.append(msd-msd0)
return np.array(ts),np.array(msds)
def dt_from_inpmd(fname='in.pmd'):
with open(fname,'r') as f:
lines = f.readlines()
for line in lines:
if 'time_interval' in line:
time_interval = abs(float(line.split()[1]))
elif 'num_iteration' in line:
num_iteration = int(line.split()[1])
elif 'num_out_pos' in line or 'num_out_pmd' in line:
num_out_pos = int(line.split()[1])
return time_interval*num_iteration/num_out_pos
def msd2D(ts,msds,fac,dim=3):
"""
Compute diffusion coefficient from time [fs] vs MSD [Ang^2] data
by solving least square problem using numpy.
Return diffusion coefficient multiplied by FAC.
"""
A= np.array([ts, np.ones(len(ts))])
A = A.T
xvar = np.var(A[:,0])
p,res,_,_ = np.linalg.lstsq(A,msds,rcond=None)
a = p[0]
b = p[1]
# fac = 1.0e-16 /1.e-15
a = a *fac /(2.0*dim)
b = b *fac
# print(res[0],xvar,np.mean(A[:,0]),len(ts))
std = np.sqrt(res[0]/len(ts)/xvar) *fac /(2.0*dim)
return a,b,std
if __name__ == "__main__":
args = docopt(__doc__)
fname = args['MSD_FILE']
offset = int(args['--offset'])
plot = args['--plot']
ts,msds = read_out_msd(fname,offset)
#...Assuming input MSD unit in A^2/fs and output in cm^2/s
fac = 1.0e-16 /1.0e-15
#...Least square
a,b,std = msd2D(ts,msds,fac)
print(' Diffusion coefficient = {0:12.4e}'.format(a)+
' +/- {0:12.4e} [cm^2/s]'.format(std))
if plot:
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context='talk',style='ticks')
#...Original time unit == fs
unit = 'fs'
tfac = 1.0
if ts[-1] > 1.0e+5: #...if max t > 100ps, time unit in ps
unit = 'ps'
tfac = 1.0e-3
plt.xlabel('Time ({0:s})'.format(unit))
plt.ylabel('MSD (A^2/{0:s})'.format(unit))
fvals = np.array([ (t*a+b)/fac for t in ts ])
plt.plot(ts*tfac,msds/tfac,'b-',label='MSD data')
plt.plot(ts*tfac,fvals/tfac,'r-',label='Fitted curve')
plt.savefig("graph_msd2D.png", format='png',
dpi=300, bbox_inches='tight')
print(' Wrote graph_msd2D.png')
| [((77, 11, 77, 25), 'numpy.var', 'np.var', ({(77, 18, 77, 24): 'A[:, (0)]'}, {}), '(A[:, (0)])', True, 'import numpy as np\n'), ((78, 16, 78, 50), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (), '', True, 'import numpy as np\n'), ((91, 11, 91, 26), 'docopt.docopt', 'docopt', ({(91, 18, 91, 25): '__doc__'}, {}), '(__doc__)', False, 'from docopt import docopt\n'), ((34, 16, 34, 38), 'os.path.dirname', 'os.path.dirname', ({(34, 32, 34, 37): 'fname'}, {}), '(fname)', False, 'import os, sys\n'), ((54, 11, 54, 23), 'numpy.array', 'np.array', ({(54, 20, 54, 22): 'ts'}, {}), '(ts)', True, 'import numpy as np\n'), ((54, 24, 54, 38), 'numpy.array', 'np.array', ({(54, 33, 54, 37): 'msds'}, {}), '(msds)', True, 'import numpy as np\n'), ((107, 8, 107, 45), 'seaborn.set', 'sns.set', (), '', True, 'import seaborn as sns\n'), ((116, 16, 116, 53), 'numpy.array', 'np.array', ({(116, 25, 116, 52): '[((t * a + b) / fac) for t in ts]'}, {}), '([((t * a + b) / fac) for t in ts])', True, 'import numpy as np\n'), ((117, 8, 117, 57), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((118, 8, 118, 62), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((119, 8, 120, 49), 'matplotlib.pyplot.savefig', 'plt.savefig', (), '', True, 'import matplotlib.pyplot as plt\n')] |
jcsesznegi/advent-of-code-2017 | 5/part2.py | 9710e184e092b82aa798076b9ce3915c6e42758d | import os
f = open(os.path.join(os.path.dirname(__file__), '../input/5/part2.txt'), 'r')
class InstructionSet:
def __init__(self, instructions):
self.instructions = instructions
self.currentIndex = 0
self.numberSteps = 0
def _changeOffsetValue(self, index):
if self.instructions[index] >= 3:
self.instructions[index] -= 1
else:
self.instructions[index] += 1
def jump(self):
self.numberSteps += 1
jumpNumber = self.instructions[self.currentIndex]
oldIndex = self.currentIndex
self.currentIndex += jumpNumber
self._changeOffsetValue(oldIndex)
def run(self):
while (self.currentIndex >= 0
and self.currentIndex < len(self.instructions)):
self.jump()
def main():
def formatLine(line):
return int(line.rstrip())
line = f.readline()
instructions = []
while line:
instructions.append(formatLine(line))
line = f.readline()
instructionSet = InstructionSet(instructions)
instructionSet.run()
print(instructionSet.numberSteps)
if __name__ == '__main__':
main()
| [((3, 22, 3, 47), 'os.path.dirname', 'os.path.dirname', ({(3, 38, 3, 46): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
MhmdRyhn/behavior_test | features/steps/basic_account_add_bdd.py | 868252e0b31596e0bff4a969745cf3b633c13695 | import behave
@behave.when('I add $1200 to my account')
def add_usd_1200(context):
context.account.add_cash(amount=1200)
@behave.then('It becomes $3200 in my account')
def check_for_increase_to_usd_1880(context):
assert context.account.current_cash == 3200
| [((4, 1, 4, 41), 'behave.when', 'behave.when', ({(4, 13, 4, 40): '"""I add $1200 to my account"""'}, {}), "('I add $1200 to my account')", False, 'import behave\n'), ((9, 1, 9, 46), 'behave.then', 'behave.then', ({(9, 13, 9, 45): '"""It becomes $3200 in my account"""'}, {}), "('It becomes $3200 in my account')", False, 'import behave\n')] |
naveengh6/blinkpy | tests/test_sync_module.py | e821687f2b7590b13532ac596c31e8eaa6c7b69a | """Tests camera and system functions."""
import unittest
from unittest import mock
from blinkpy.blinkpy import Blink
from blinkpy.helpers.util import BlinkURLHandler
from blinkpy.sync_module import BlinkSyncModule, BlinkOwl
from blinkpy.camera import BlinkCamera, BlinkCameraMini
@mock.patch("blinkpy.auth.Auth.query")
class TestBlinkSyncModule(unittest.TestCase):
"""Test BlinkSyncModule functions in blinkpy."""
def setUp(self):
"""Set up Blink module."""
self.blink = Blink(motion_interval=0)
self.blink.last_refresh = 0
self.blink.urls = BlinkURLHandler("test")
self.blink.sync["test"] = BlinkSyncModule(self.blink, "test", "1234", [])
self.camera = BlinkCamera(self.blink.sync)
self.mock_start = [
{
"syncmodule": {
"id": 1234,
"network_id": 5678,
"serial": "12345678",
"status": "foobar",
}
},
{"event": True},
{},
{},
None,
{"devicestatus": {}},
]
self.blink.sync["test"].network_info = {"network": {"armed": True}}
def tearDown(self):
"""Clean up after test."""
self.blink = None
self.camera = None
self.mock_start = None
def test_bad_status(self, mock_resp):
"""Check that we mark module unavaiable on bad status."""
self.blink.sync["test"].status = None
self.blink.sync["test"].available = True
self.assertFalse(self.blink.sync["test"].online)
self.assertFalse(self.blink.sync["test"].available)
def test_bad_arm(self, mock_resp):
"""Check that we mark module unavaiable if bad arm status."""
self.blink.sync["test"].network_info = None
self.blink.sync["test"].available = True
self.assertEqual(self.blink.sync["test"].arm, None)
self.assertFalse(self.blink.sync["test"].available)
self.blink.sync["test"].network_info = {}
self.blink.sync["test"].available = True
self.assertEqual(self.blink.sync["test"].arm, None)
self.assertFalse(self.blink.sync["test"].available)
def test_get_events(self, mock_resp):
"""Test get events function."""
mock_resp.return_value = {"event": True}
self.assertEqual(self.blink.sync["test"].get_events(), True)
def test_get_events_fail(self, mock_resp):
"""Test handling of failed get events function."""
mock_resp.return_value = None
self.assertFalse(self.blink.sync["test"].get_events())
mock_resp.return_value = {}
self.assertFalse(self.blink.sync["test"].get_events())
def test_get_camera_info(self, mock_resp):
"""Test get camera info function."""
mock_resp.return_value = {"camera": ["foobar"]}
self.assertEqual(self.blink.sync["test"].get_camera_info("1234"), "foobar")
def test_get_camera_info_fail(self, mock_resp):
"""Test handling of failed get camera info function."""
mock_resp.return_value = None
self.assertEqual(self.blink.sync["test"].get_camera_info("1"), {})
mock_resp.return_value = {}
self.assertEqual(self.blink.sync["test"].get_camera_info("1"), {})
mock_resp.return_value = {"camera": None}
self.assertEqual(self.blink.sync["test"].get_camera_info("1"), {})
def test_get_network_info(self, mock_resp):
"""Test network retrieval."""
mock_resp.return_value = {"network": {"sync_module_error": False}}
self.assertTrue(self.blink.sync["test"].get_network_info())
mock_resp.return_value = {"network": {"sync_module_error": True}}
self.assertFalse(self.blink.sync["test"].get_network_info())
def test_get_network_info_failure(self, mock_resp):
"""Test failed network retrieval."""
mock_resp.return_value = {}
self.blink.sync["test"].available = True
self.assertFalse(self.blink.sync["test"].get_network_info())
self.assertFalse(self.blink.sync["test"].available)
self.blink.sync["test"].available = True
mock_resp.return_value = None
self.assertFalse(self.blink.sync["test"].get_network_info())
self.assertFalse(self.blink.sync["test"].available)
def test_check_new_videos_startup(self, mock_resp):
"""Test that check_new_videos does not block startup."""
sync_module = self.blink.sync["test"]
self.blink.last_refresh = None
self.assertFalse(sync_module.check_new_videos())
def test_check_new_videos(self, mock_resp):
"""Test recent video response."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1990-01-01T00:00:00+00:00",
}
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 0
self.assertEqual(sync_module.motion, {})
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(
sync_module.last_record["foo"],
{"clip": "/foo/bar.mp4", "time": "1990-01-01T00:00:00+00:00"},
)
self.assertEqual(sync_module.motion, {"foo": True})
mock_resp.return_value = {"media": []}
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": False})
self.assertEqual(
sync_module.last_record["foo"],
{"clip": "/foo/bar.mp4", "time": "1990-01-01T00:00:00+00:00"},
)
def test_check_new_videos_old_date(self, mock_resp):
"""Test videos return response with old date."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1970-01-01T00:00:00+00:00",
}
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 1000
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": False})
def test_check_no_motion_if_not_armed(self, mock_resp):
"""Test that motion detection is not set if module unarmed."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1990-01-01T00:00:00+00:00",
}
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 1000
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": True})
sync_module.network_info = {"network": {"armed": False}}
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": False})
def test_check_multiple_videos(self, mock_resp):
"""Test motion found even with multiple videos."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1970-01-01T00:00:00+00:00",
},
{
"device_name": "foo",
"media": "/bar/foo.mp4",
"created_at": "1990-01-01T00:00:00+00:00",
},
{
"device_name": "foo",
"media": "/foobar.mp4",
"created_at": "1970-01-01T00:00:01+00:00",
},
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 1000
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": True})
expected_result = {
"foo": {"clip": "/bar/foo.mp4", "time": "1990-01-01T00:00:00+00:00"}
}
self.assertEqual(sync_module.last_record, expected_result)
def test_check_new_videos_failed(self, mock_resp):
"""Test method when response is unexpected."""
mock_resp.side_effect = [None, "just a string", {}]
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.motion["foo"] = True
self.assertFalse(sync_module.check_new_videos())
self.assertFalse(sync_module.motion["foo"])
sync_module.motion["foo"] = True
self.assertFalse(sync_module.check_new_videos())
self.assertFalse(sync_module.motion["foo"])
sync_module.motion["foo"] = True
self.assertFalse(sync_module.check_new_videos())
self.assertFalse(sync_module.motion["foo"])
def test_sync_start(self, mock_resp):
"""Test sync start function."""
mock_resp.side_effect = self.mock_start
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].name, "test")
self.assertEqual(self.blink.sync["test"].sync_id, 1234)
self.assertEqual(self.blink.sync["test"].network_id, 5678)
self.assertEqual(self.blink.sync["test"].serial, "12345678")
self.assertEqual(self.blink.sync["test"].status, "foobar")
def test_unexpected_summary(self, mock_resp):
"""Test unexpected summary response."""
self.mock_start[0] = None
mock_resp.side_effect = self.mock_start
self.assertFalse(self.blink.sync["test"].start())
def test_summary_with_no_network_id(self, mock_resp):
"""Test handling of bad summary."""
self.mock_start[0]["syncmodule"] = None
mock_resp.side_effect = self.mock_start
self.assertFalse(self.blink.sync["test"].start())
def test_summary_with_only_network_id(self, mock_resp):
"""Test handling of sparse summary."""
self.mock_start[0]["syncmodule"] = {"network_id": 8675309}
mock_resp.side_effect = self.mock_start
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].network_id, 8675309)
def test_unexpected_camera_info(self, mock_resp):
"""Test unexpected camera info response."""
self.blink.sync["test"].cameras["foo"] = None
self.mock_start[5] = None
mock_resp.side_effect = self.mock_start
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].cameras, {"foo": None})
def test_missing_camera_info(self, mock_resp):
"""Test missing key from camera info response."""
self.blink.sync["test"].cameras["foo"] = None
self.mock_start[5] = {}
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].cameras, {"foo": None})
def test_sync_attributes(self, mock_resp):
"""Test sync attributes."""
self.assertEqual(self.blink.sync["test"].attributes["name"], "test")
self.assertEqual(self.blink.sync["test"].attributes["network_id"], "1234")
def test_owl_start(self, mock_resp):
"""Test owl camera instantiation."""
response = {
"name": "foo",
"id": 2,
"serial": "foobar123",
"enabled": True,
"network_id": 1,
"thumbnail": "/foo/bar",
}
self.blink.last_refresh = None
self.blink.homescreen = {"owls": [response]}
owl = BlinkOwl(self.blink, "foo", 1234, response)
self.assertTrue(owl.start())
self.assertTrue("foo" in owl.cameras)
self.assertEqual(owl.cameras["foo"].__class__, BlinkCameraMini)
| [((11, 1, 11, 38), 'unittest.mock.patch', 'mock.patch', ({(11, 12, 11, 37): '"""blinkpy.auth.Auth.query"""'}, {}), "('blinkpy.auth.Auth.query')", False, 'from unittest import mock\n'), ((17, 21, 17, 45), 'blinkpy.blinkpy.Blink', 'Blink', (), '', False, 'from blinkpy.blinkpy import Blink\n'), ((19, 26, 19, 49), 'blinkpy.helpers.util.BlinkURLHandler', 'BlinkURLHandler', ({(19, 42, 19, 48): '"""test"""'}, {}), "('test')", False, 'from blinkpy.helpers.util import BlinkURLHandler\n'), ((20, 34, 20, 81), 'blinkpy.sync_module.BlinkSyncModule', 'BlinkSyncModule', ({(20, 50, 20, 60): 'self.blink', (20, 62, 20, 68): '"""test"""', (20, 70, 20, 76): '"""1234"""', (20, 78, 20, 80): '[]'}, {}), "(self.blink, 'test', '1234', [])", False, 'from blinkpy.sync_module import BlinkSyncModule, BlinkOwl\n'), ((21, 22, 21, 50), 'blinkpy.camera.BlinkCamera', 'BlinkCamera', ({(21, 34, 21, 49): 'self.blink.sync'}, {}), '(self.blink.sync)', False, 'from blinkpy.camera import BlinkCamera, BlinkCameraMini\n'), ((291, 14, 291, 57), 'blinkpy.sync_module.BlinkOwl', 'BlinkOwl', ({(291, 23, 291, 33): 'self.blink', (291, 35, 291, 40): '"""foo"""', (291, 42, 291, 46): '1234', (291, 48, 291, 56): 'response'}, {}), "(self.blink, 'foo', 1234, response)", False, 'from blinkpy.sync_module import BlinkSyncModule, BlinkOwl\n')] |
naylor-b/dymos | dymos/examples/min_time_climb/aero/aero.py | 56ee72041056ae20c3332d060e291c4da93844b1 | from __future__ import absolute_import
import numpy as np
from openmdao.api import Group
from .dynamic_pressure_comp import DynamicPressureComp
from .lift_drag_force_comp import LiftDragForceComp
from .cd0_comp import CD0Comp
from .kappa_comp import KappaComp
from .cla_comp import CLaComp
from .cl_comp import CLComp
from .cd_comp import CDComp
from .mach_comp import MachComp
class AeroGroup(Group):
"""
The purpose of the AeroGroup is to compute the aerodynamic forces on the
aircraft in the body frame.
Parameters
----------
v : float
air-relative velocity (m/s)
sos : float
local speed of sound (m/s)
rho : float
atmospheric density (kg/m**3)
alpha : float
angle of attack (rad)
S : float
aerodynamic reference area (m**2)
"""
def initialize(self):
self.options.declare('num_nodes', types=int,
desc='Number of nodes to be evaluated in the RHS')
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem(name='mach_comp',
subsys=MachComp(num_nodes=nn),
promotes_inputs=['v', 'sos'],
promotes_outputs=['mach'])
self.add_subsystem(name='cd0_comp',
subsys=CD0Comp(num_nodes=nn),
promotes_inputs=['mach'],
promotes_outputs=['CD0'])
self.add_subsystem(name='kappa_comp',
subsys=KappaComp(num_nodes=nn),
promotes_inputs=['mach'],
promotes_outputs=['kappa'])
self.add_subsystem(name='cla_comp',
subsys=CLaComp(num_nodes=nn),
promotes_inputs=['mach'],
promotes_outputs=['CLa'])
self.add_subsystem(name='CL_comp',
subsys=CLComp(num_nodes=nn),
promotes_inputs=['alpha', 'CLa'],
promotes_outputs=['CL'])
self.add_subsystem(name='CD_comp',
subsys=CDComp(num_nodes=nn),
promotes_inputs=['CD0', 'alpha', 'CLa', 'kappa'],
promotes_outputs=['CD'])
self.add_subsystem(name='q_comp',
subsys=DynamicPressureComp(num_nodes=nn),
promotes_inputs=['rho', 'v'],
promotes_outputs=['q'])
self.add_subsystem(name='lift_drag_force_comp',
subsys=LiftDragForceComp(num_nodes=nn),
promotes_inputs=['CL', 'CD', 'q', 'S'],
promotes_outputs=['f_lift', 'f_drag'])
| [] |
jakeb1996/SBS | stats.py | 3bcc0017d22674d4290be1b272aeac4836f0d5ec | import matplotlib.pyplot as plt
import argparse, csv, numpy, time, os, re
def main(resultsFile, toolName):
filesToCalc = []
toolNames = []
if os.path.isfile(resultsFile):
# the user must have defined an exact file to plot
filesToCalc.append(resultsFile)
toolNames.append(toolName)
else:
# check if there are multiple files matching the criteria
dir = (os.sep).join(resultsFile.split(os.sep)[:-1])
fileNameStart = resultsFile.split(os.sep)[-1]
for (dirpath, dirnames, filenames) in os.walk(dir):
for filename in filenames:
reMatch = re.search('%s_((aggregate|system)|(\d)+)\\b' % fileNameStart, filename)
if bool(reMatch):
filesToCalc.append(os.path.join(dirpath, filename))
toolNames.append('%s %s' %(toolName, reMatch.group(1).title()))
# start plotting
i = 0
while i < len(filesToCalc):
stat(filesToCalc[i], toolNames[i])
i = i + 1
def stat(resultsFile, toolName):
print 'Running for: %s\n' % toolName
TIME_ELAPSED = []
TIME_GAPS = []
config = {
'data-type-default' : int
}
# the aggregate functions to perform on each set. each is a function name.
# user-defined functions at bottom of file
stats = [len, min, q1, median, mean, q3, max, std]
measurements = {
# measurement configurations must appear in the order of the associated CSV columns
# --- sample ---
# 'stat_name' : {
# ['data-type' : float,]
# 'data' : [],
# 'title' : 'measurement title'
# },
# --- end sample ---
### START CHILD PROCESS STATS ###
'time' : {
'data' : [],
'data-type' : float,
'title' : 'Time'
},
'num_threads' : {
'data' : [],
'title' : 'Number of Threads'
},
'cpu_percent' : {
'data' : [],
'data-type' : float,
'title' : 'CPU Utilisation'
},
'mem_rss' : {
'data' : [],
'data-type' : float,
'title' : 'Resident Set Size (RSS) Memory Utilisation'
},
'mem_vms' : {
'data' : [],
'title' : 'Virtual Memory Size (VMS) Memory Utilisation'
},
'io_read_count' : {
'data' : [],
'title' : 'Disk IO Read Count'
},
'io_read_bytes' : {
'data' : [],
'title' : 'Disk IO Read Volume'
},
'io_write_count' : {
'data' : [],
'title' : 'Disk IO Write Count'
},
'io_write_bytes' : {
'data' : [],
'title' : 'Disk IO Write Volume'
},
'child_process_count' : {
'data' : [],
'title' : 'Child Process Count'
},
### START SYSTEM STATS ###
# if the stat was defined above, then don't define it again
'mem_used' : {
'data' : [],
'data-type' : float,
'title' : 'Physical Memory Used (megabytes)'
},
'mem_avai' : {
'data' : [],
'data-type' : float,
'title' : 'Physical Memory Available (megabytes)',
},
'process_count' : {
'data' : [],
'title' : 'Process Count'
}
}
# due to dictionaries not being in order, we need to know the order the data appears and
# match it with the associated plot configuration above.
headerOrder = []
# put all the times in a list
timeRecords = []
with open(resultsFile, 'r') as fcsv:
dataCsv = csv.reader(fcsv, delimiter=',')
# Set the headerOrder and remove the time column header
headerOrder = dataCsv.next()
firstTime = None
for row in dataCsv:
# Elapsed time
timeRecords.append(float(row[0]))
TIME_ELAPSED.append(float(row[0]) - float(timeRecords[0]))
if firstTime == False:
TIME_GAPS.append(float(row[0]) - measurements['time']['data'][-1])
i = 0 # skip zero as its the time (as above)
for measurement in headerOrder:
if 'data-type' in measurements[measurement]:
measurements[measurement]['data'].append(measurements[measurement]['data-type'](row[i]))
else:
measurements[measurement]['data'].append(config['data-type-default'](row[i]))
i += 1
firstTime = False
if len(timeRecords) == 0:
print 'No data recorded in %s.\nExiting.\n\n' % resultsFile
return 0
resultsFileName = '%s_stats.csv' % resultsFile
with open(resultsFileName, 'w') as scsv:
print 'Writing to file: %s' % resultsFileName
# write headers line
scsv.write('measurement,%s\n' % ','.join(map(funcName, stats)))
for measurement in headerOrder:
line = '%s' % measurement
for stat in stats:
line = ('%s,%s' % (line, stat(measurements[measurement]['data'])))
scsv.write('%s\n' % line)
# now, because the time gaps were calculated separately, run the stats on them tool
# messy, I know. sorry!
line = '%s' % 'time_gaps'
for stat in stats:
line = ('%s,%s' % (line, stat(TIME_GAPS)))
scsv.write('%s\n' % line)
# write start and end time
scsv.write('start_time,%s,"%s"\nend_time,%s,"%s"\ntime_elapsed,%s,sec,%s,min' % (timeRecords[0], time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timeRecords[0])), timeRecords[-1], time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timeRecords[-1])), (timeRecords[-1] - timeRecords[0]), ((timeRecords[-1] - timeRecords[0]) / 60)))
print '\nFinished.'
def q1(seq):
return numpy.percentile(seq, 25)
def median(seq):
return numpy.percentile(seq, 50)
def mean(seq):
return sum(seq) / len(seq)
def q3(seq):
return numpy.percentile(seq, 75)
def std(seq):
return numpy.std(seq)
def funcName(func):
return func.__name__
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Plotter for the Software Benchmarking Script')
parser.add_argument('-f', help='Results file as input (in csv format)')
parser.add_argument('-t', help='Name of tool', default=None)
parser.add_argument('--wincntxmnu', help='Indicates SBS stats was launched from the Windows context menu. See README for help.', action='store_true')
args = parser.parse_args()
# Not used
#if args.wincntxmnu:
# args.t = raw_input('Enter the plot prefix: ')
main(args.f, args.t)
| [] |
andrey18106/vocabulary_bot | callback_handlers.py | 68a5835fb69e255df1766c2ed5c5228daaa4f06f | # -*- coding: utf-8 -*-
# ===== Default imports =====
import asyncio
import logging
# ===== External libs imports =====
from aiogram import Bot, Dispatcher, types
from aiogram.dispatcher import FSMContext
# ===== Local imports =====
from analytics import BotAnalytics
from db_manager import DbManager
from lang_manager import LangManager
from markups_manager import MarkupManager
from states.Dictionary import DictionaryQuizState, DictionaryState, DictionaryEditWordState, DictionarySearchWordState
from states.Mailing import AdminMailingState
import pagination
class VocabularyBotCallbackHandler:
"""Class for Vocabulary Bot callback handlers"""
def __init__(self, db_manager: DbManager, lang_manager: LangManager, markup_manager: MarkupManager,
analytics: BotAnalytics, dispatcher: Dispatcher, bot: Bot):
self.db = db_manager
self.lang = lang_manager
self.markup = markup_manager
self.analytics = analytics
self.dp = dispatcher
self.bot = bot
self.__init_handlers()
def __init_handlers(self):
# CALLBACK HANDLER FOR USER LANGUAGE SETTINGS
@self.dp.callback_query_handler(lambda query: query.data.startswith('lang_setting_'))
@self.analytics.callback_metric
async def language_settings_callback_handler(query: types.CallbackQuery):
"""Handle selecting preferred interface language"""
user_lang = self.lang.parse_user_lang(query['from']['id'])
selected_lang = query['data'][-2:]
if selected_lang != user_lang:
self.db.set_user_lang(query['from']['id'], selected_lang)
await query.message.delete()
await query.message.answer(text=self.lang.get_page_text('LANG_SETTINGS', 'SUCCESS', selected_lang),
reply_markup=self.markup.get_main_menu_markup(selected_lang))
await query.answer()
else:
await query.answer(self.lang.get_page_text('LANG_SETTINGS', 'ERROR', user_lang), show_alert=True)
@self.dp.callback_query_handler(lambda query: query.data.startswith('help_question_'))
@self.analytics.callback_metric
async def help_callback_handler(query: types.CallbackQuery):
"""Handle HELP page question buttons"""
user_id = query['from']['id']
user_lang = self.lang.parse_user_lang(user_id)
question = query['data']
await query.message.edit_text(self.lang.get_page_text("HELP", question, user_lang))
await query.message.edit_reply_markup(self.markup.get_help_back_markup(user_lang))
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data == 'back_to_help')
@self.analytics.callback_metric
async def back_to_help_callback_handler(query: types.CallbackQuery):
"""Handle HELP page question back button"""
user_id = query['from']['id']
user_lang = self.lang.parse_user_lang(user_id)
await query.message.edit_text(self.lang.get_page_text("HELP", "TEXT", user_lang))
await query.message.edit_reply_markup(self.markup.get_help_markup(user_lang))
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data.startswith('settings_'))
@self.analytics.callback_metric
async def settings_page_callback_handler(query: types.CallbackQuery):
"""Handle SETTINGS page buttons"""
user_id = query['from']['id']
user_lang = self.lang.parse_user_lang(user_id)
page = query['data'][9:]
if page == 'interface':
await query.message.edit_text(self.lang.get_page_text("LANG_SETTINGS", "TEXT", user_lang))
await query.message.edit_reply_markup(self.markup.get_lang_settings_markup(user_lang))
await query.answer()
elif page == 'newsletters':
await query.message.edit_text(self.lang.get_page_text("NEWSLETTER_SETTINGS", "TEXT", user_lang))
await query.message.edit_reply_markup(self.markup.get_news_settings_markup(user_lang))
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data.startswith('news_setting_'))
@self.analytics.callback_metric
async def language_settings_callback_handler(query: types.CallbackQuery):
"""Newsletters settings"""
user_lang = self.lang.parse_user_lang(query['from']['id'])
selected_option = query['data'][13:]
user_mailings = self.db.get_user_mailings(query['from']['id'])
mailings_settings = ['disable', 'important', 'all']
if mailings_settings[user_mailings] != selected_option:
if selected_option == 'all' and user_mailings != 2:
self.db.set_user_mailings(query['from']['id'], 2)
elif selected_option == 'important' and user_mailings != 1:
self.db.set_user_mailings(query['from']['id'], 1)
elif selected_option == 'disable' and user_mailings != 0:
self.db.set_user_mailings(query['from']['id'], 0)
await query.message.delete()
await query.message.answer(self.lang.get_page_text("NEWSLETTER_SETTINGS", "SUCCESS", user_lang))
else:
await query.answer(self.lang.get_page_text('NEWSLETTER_SETTINGS', 'ALREADY_SET', user_lang),
show_alert=True)
async def _send_dictionary_page(message: types.Message, user_id: int, user_lang: str, from_lang: str,
to_lang: str, state: FSMContext):
current_state = {
'current_page': 0,
'from_lang': from_lang,
'to_lang': to_lang
}
paginator = getattr(pagination, 'dictionary'.capitalize() + 'Paginator')(self.lang, self.db, self.markup,
user_id,
current_page=current_state)
await message.answer(text=self.lang.get_page_text('DICTIONARY', 'TEXT', user_lang),
reply_markup=self.markup.get_dictionary_markup(user_lang))
await message.answer(text=paginator.first_page(user_lang), reply_markup=paginator.get_reply_markup())
async with state.proxy() as data:
data['curr_pagination_page'] = current_state
await DictionaryState.dictionary.set()
@self.dp.callback_query_handler(lambda query: query.data.startswith('dictionary_'), state="*")
@self.analytics.callback_fsm_metric
async def dictionary_list_callback_handler(query: types.CallbackQuery, state: FSMContext):
user_lang = self.lang.parse_user_lang(query['from']['id'])
selected_dict_pairs = query.data[11:].split('_')
from_lang = selected_dict_pairs[0]
to_lang = selected_dict_pairs[1]
await query.message.delete()
await _send_dictionary_page(query.message, query['from']['id'], user_lang, from_lang, to_lang, state)
# PAGINATION
@self.dp.callback_query_handler(lambda query: query.data.startswith('first_'), state="*")
@self.analytics.callback_fsm_metric
async def pagination_first_callback_handler(query: types.CallbackQuery, state: FSMContext):
action = query.data[6:]
user_lang = self.lang.parse_user_lang(query['from']['id'])
async with state.proxy() as data:
if 'curr_pagination_page' in data:
current_page = data['curr_pagination_page']
paginator = getattr(pagination, action.capitalize() + 'Paginator')(self.lang, self.db, self.markup,
query['from']['id'],
current_page=current_page)
if not paginator.is_first():
await query.message.edit_text(text=paginator.first_page(user_lang),
reply_markup=paginator.get_reply_markup(),
parse_mode=paginator.get_parse_mode())
data['curr_pagination_page'] = paginator.get_state_data()
else:
await query.answer(self.lang.get_page_text('PAGINATION', 'FIRST_REACHED', user_lang),
show_alert=True)
logging.getLogger(type(self).__name__).info(f'[{action}] callback executed.')
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data.startswith('prev_'), state="*")
@self.analytics.callback_fsm_metric
async def pagination_prev_callback_handler(query: types.CallbackQuery, state: FSMContext):
action = query.data[5:]
user_lang = self.lang.parse_user_lang(query['from']['id'])
async with state.proxy() as data:
if 'curr_pagination_page' in data:
current_page = data['curr_pagination_page']
paginator = getattr(pagination, action.capitalize() + 'Paginator')(self.lang, self.db, self.markup,
query['from']['id'],
current_page=current_page)
if not paginator.is_first():
await query.message.edit_text(text=paginator.prev_page(user_lang),
reply_markup=paginator.get_reply_markup(),
parse_mode=paginator.get_parse_mode())
data['curr_pagination_page'] = paginator.get_state_data()
else:
await query.answer(self.lang.get_page_text('PAGINATION', 'FIRST_REACHED', user_lang),
show_alert=True)
logging.getLogger(type(self).__name__).info(f'[{action}] callback executed.')
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data.startswith('next_'), state="*")
@self.analytics.callback_fsm_metric
async def pagination_next_callback_handler(query: types.CallbackQuery, state: FSMContext):
action = query.data[5:]
user_lang = self.lang.parse_user_lang(query['from']['id'])
async with state.proxy() as data:
if 'curr_pagination_page' in data:
current_page = data['curr_pagination_page']
paginator = getattr(pagination, action.capitalize() + 'Paginator')(self.lang, self.db, self.markup,
query['from']['id'],
current_page=current_page)
if not paginator.is_last():
await query.message.edit_text(text=paginator.next_page(user_lang),
reply_markup=paginator.get_reply_markup(),
parse_mode=paginator.get_parse_mode())
data['curr_pagination_page'] = paginator.get_state_data()
else:
await query.answer(self.lang.get_page_text('PAGINATION', 'LAST_REACHED', user_lang),
show_alert=True)
logging.getLogger(type(self).__name__).info(f'[{action}] callback executed.')
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data.startswith('last_'), state="*")
@self.analytics.callback_fsm_metric
async def pagination_last_callback_handler(query: types.CallbackQuery, state: FSMContext):
action = query.data[5:]
user_lang = self.lang.parse_user_lang(query['from']['id'])
async with state.proxy() as data:
if 'curr_pagination_page' in data:
current_page = data['curr_pagination_page']
paginator = getattr(pagination, action.capitalize() + 'Paginator')(self.lang, self.db, self.markup,
query['from']['id'],
current_page=current_page)
if not paginator.is_last():
await query.message.edit_text(text=paginator.last_page(user_lang),
reply_markup=paginator.get_reply_markup(),
parse_mode=paginator.get_parse_mode())
data['curr_pagination_page'] = paginator.get_state_data()
else:
await query.answer(self.lang.get_page_text('PAGINATION', 'LAST_REACHED', user_lang),
show_alert=True)
logging.getLogger(type(self).__name__).info(f'[{action}] callback executed.')
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data == 'profile_referral_link')
@self.analytics.callback_metric
async def profile_referral_link_callback_handler(query: types.CallbackQuery):
user_lang = self.lang.parse_user_lang(query['from']['id'])
await query.message.answer(self.lang.get_user_referral_link_page(query['from']['id'], user_lang))
await query.message.edit_reply_markup(None)
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data.startswith('mailings_'))
@self.analytics.callback_metric
async def admin_mailings_new_callback_handler(query: types.CallbackQuery):
user_lang = self.lang.parse_user_lang(query['from']['id'])
action = query['data'][9:]
if action == 'new':
await AdminMailingState.message.set()
await query.message.delete()
await query.message.answer(text=self.lang.get_page_text('MAILINGS', 'NEW', user_lang),
reply_markup=self.markup.get_cancel_markup())
elif action == 'schedule_list':
await query.answer()
# QUIZ CALLBACKS
@self.dp.callback_query_handler(lambda query: query.data == 'quiz_start', state="*")
@self.analytics.callback_fsm_metric
async def quiz_start_callback_handler(query: types.CallbackQuery, state: FSMContext):
await query.answer()
await query.message.delete()
user_lang = self.lang.parse_user_lang(query['from']['id'])
async with state.proxy() as data:
from_lang = data['curr_pagination_page']['from_lang']
to_lang = data['curr_pagination_page']['to_lang']
quiz_data = self.db.get_user_quiz_data(query['from']['id'], from_lang, to_lang, 10)
await DictionaryQuizState.user_answers.set()
async with state.proxy() as data:
data['quiz_results'] = []
data['quiz_data'] = quiz_data
data['index'] = 1
question = f"{data['index']}/{len(data['quiz_data'])} " + \
self.lang.get_page_text('QUIZ', 'QUESTION', user_lang).format(quiz_data[0]['word'])
await self.bot.send_poll(chat_id=query['from']['id'],
question=question,
options=quiz_data[0]['options'],
correct_option_id=quiz_data[0]['options'].index(quiz_data[0]['answer']),
type='quiz',
reply_markup=self.markup.get_quiz_next_markup(user_lang))
@self.dp.callback_query_handler(state=DictionaryQuizState.user_answers)
@self.analytics.callback_fsm_metric
async def quiz_next_callback_handler(query: types.CallbackQuery, state: FSMContext):
user_lang = self.lang.parse_user_lang(query['from']['id'])
if query.message.poll.total_voter_count == 1:
await query.answer()
await query.message.delete()
async with state.proxy() as data:
curr_q_index = data['index']
quiz_result = {
'word': data['quiz_data'][curr_q_index - 1]['word'],
'selected_option': query.message.poll.options.index(
list(filter(lambda item: item.voter_count == 1,
query.message.poll.options))[0]),
'correct_option': query.message.poll.correct_option_id,
'options': list(map(lambda item: dict(item), query.message.poll.options))
}
data['quiz_results'].append(quiz_result)
if curr_q_index < len(data['quiz_data']) - 1:
data['index'] = curr_q_index + 1
question = f"{data['index']}/{len(data['quiz_data'])} "
else:
question = f"{len(data['quiz_data'])}/{len(data['quiz_data'])} "
await DictionaryQuizState.finish.set()
question += self.lang.get_page_text('QUIZ', 'QUESTION', user_lang).format(
data['quiz_data'][curr_q_index]['word'])
await self.bot.send_poll(chat_id=query['from']['id'],
question=question,
options=data['quiz_data'][curr_q_index]['options'],
correct_option_id=data['quiz_data'][curr_q_index]['options'].index(
data['quiz_data'][curr_q_index]['answer']),
type='quiz',
reply_markup=self.markup.get_quiz_next_markup(user_lang)
if curr_q_index != len(data['quiz_data']) - 1 else
self.markup.get_quiz_finish_markup(user_lang))
else:
await query.answer(self.lang.get_page_text('QUIZ', 'NON_SELECTED', user_lang),
show_alert=True)
@self.dp.callback_query_handler(state=DictionaryQuizState.finish)
@self.analytics.callback_fsm_metric
async def quiz_finish_callback_handler(query: types.CallbackQuery, state: FSMContext):
user_lang = self.lang.parse_user_lang(query['from']['id'])
if query.message.poll.total_voter_count == 1:
await query.answer()
await query.message.delete()
async with state.proxy() as data:
quiz_result = {
'word': data['quiz_data'][data['index']]['word'],
'selected_option': query.message.poll.options.index(
list(filter(lambda item: item.voter_count == 1,
query.message.poll.options))[0]),
'correct_option': query.message.poll.correct_option_id,
'options': list(map(lambda item: dict(item), query.message.poll.options))
}
data['quiz_results'].append(quiz_result)
await query.message.answer(self.lang.get_page_text('QUIZ', 'FINISH', user_lang))
await query.message.answer(self.lang.get_quiz_results_page(data['quiz_results'], user_lang),
parse_mode='Markdown')
last_pagination_page = data['curr_pagination_page']
await state.finish()
await DictionaryState.dictionary.set()
async with state.proxy() as data:
data['curr_pagination_page'] = last_pagination_page
else:
await query.answer(self.lang.get_page_text('QUIZ', 'NON_SELECTED', user_lang),
show_alert=True)
@self.dp.callback_query_handler(state=DictionarySearchWordState.search_query)
@self.analytics.callback_fsm_metric
async def search_word_actions_callback_handler(query: types.CallbackQuery, state: FSMContext):
user_lang = self.lang.parse_user_lang(query['from']['id'])
action = query.data[10:]
if action == 'add':
async with state.proxy() as data:
new_word_string = data['search_query']
new_word_translation = data['translation']
from_lang = data['curr_pagination_page']['from_lang']
to_lang = data['curr_pagination_page']['to_lang']
self.db.add_user_word(new_word_string, new_word_translation, query['from']['id'], from_lang,
to_lang)
await query.message.edit_text(self.lang.get_page_text('ADD_WORD', 'SUCCESSFUL_ADDED', user_lang))
await state.finish()
await asyncio.sleep(1)
await _send_dictionary_page(query.message, query['from']['id'], user_lang, from_lang, to_lang, state)
elif action == 'find_another':
await query.message.delete()
await query.message.answer(text=self.lang.get_page_text('FIND_WORD', 'WELCOME_TEXT', user_lang),
reply_markup=self.markup.get_cancel_markup())
@self.dp.callback_query_handler(state=DictionaryEditWordState.search_query)
@self.analytics.callback_metric
async def edit_word_actions_callback_handler(query: types.CallbackQuery):
user_lang = self.lang.parse_user_lang(query['from']['id'])
action = query.data[10:]
if action == 'string':
await DictionaryEditWordState.new_word_string.set()
await query.message.delete()
await query.message.answer(text=self.lang.get_page_text('EDIT_WORD', 'NEW_STRING', user_lang),
reply_markup=self.markup.get_cancel_markup())
elif action == 'translation':
await DictionaryEditWordState.new_word_translation.set()
await query.message.delete()
await query.message.answer(text=self.lang.get_page_text('EDIT_WORD', 'NEW_TRANSLATION', user_lang),
reply_markup=self.markup.get_cancel_markup())
| [((127, 18, 127, 50), 'states.Dictionary.DictionaryState.dictionary.set', 'DictionaryState.dictionary.set', ({}, {}), '()', False, 'from states.Dictionary import DictionaryQuizState, DictionaryState, DictionaryEditWordState, DictionarySearchWordState\n'), ((260, 18, 260, 56), 'states.Dictionary.DictionaryQuizState.user_answers.set', 'DictionaryQuizState.user_answers.set', ({}, {}), '()', False, 'from states.Dictionary import DictionaryQuizState, DictionaryState, DictionaryEditWordState, DictionarySearchWordState\n'), ((242, 22, 242, 53), 'states.Mailing.AdminMailingState.message.set', 'AdminMailingState.message.set', ({}, {}), '()', False, 'from states.Mailing import AdminMailingState\n'), ((335, 22, 335, 54), 'states.Dictionary.DictionaryState.dictionary.set', 'DictionaryState.dictionary.set', ({}, {}), '()', False, 'from states.Dictionary import DictionaryQuizState, DictionaryState, DictionaryEditWordState, DictionarySearchWordState\n'), ((357, 22, 357, 38), 'asyncio.sleep', 'asyncio.sleep', ({(357, 36, 357, 37): '(1)'}, {}), '(1)', False, 'import asyncio\n'), ((370, 22, 370, 67), 'states.Dictionary.DictionaryEditWordState.new_word_string.set', 'DictionaryEditWordState.new_word_string.set', ({}, {}), '()', False, 'from states.Dictionary import DictionaryQuizState, DictionaryState, DictionaryEditWordState, DictionarySearchWordState\n'), ((375, 22, 375, 72), 'states.Dictionary.DictionaryEditWordState.new_word_translation.set', 'DictionaryEditWordState.new_word_translation.set', ({}, {}), '()', False, 'from states.Dictionary import DictionaryQuizState, DictionaryState, DictionaryEditWordState, DictionarySearchWordState\n'), ((297, 30, 297, 62), 'states.Dictionary.DictionaryQuizState.finish.set', 'DictionaryQuizState.finish.set', ({}, {}), '()', False, 'from states.Dictionary import DictionaryQuizState, DictionaryState, DictionaryEditWordState, DictionarySearchWordState\n')] |
karolinanikolova/SoftUni-Software-Engineering | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/06_Nested-Loops/02.Exercise-06-Special-Numbers.py | 7891924956598b11a1e30e2c220457c85c40f064 | # 6. Специални числа
# Да се напише програма, която чете едно цяло число N, въведено от потребителя, и генерира всички възможни "специални"
# числа от 1111 до 9999. За да бъде “специално” едно число, то трябва да отговаря на следното условие:
# • N да се дели на всяка една от неговите цифри без остатък.
# Пример: при N = 16, 2418 е специално число:
# • 16 / 2 = 8 без остатък
# • 16 / 4 = 4 без остатък
# • 16 / 1 = 16 без остатък
# • 16 / 8 = 2 без остатък
N = int(input())
for number in range(1111, 9999 + 1):
is_number_special = True
number_as_string = str(number)
# Could also write for index, digit in enumerate(number_as_string): but since we don't need the index we don't need enumerate.
for digit in number_as_string:
if int(digit) == 0 or N % int(digit) != 0:
is_number_special = False
break
if is_number_special:
print(f'{number_as_string}', end = ' ')
| [] |
sqbl/scikit-optimize | skopt/tests/test_transformers.py | c1866d5a9ad67efe93ac99736bfc2dc659b561d4 | import pytest
import numbers
import numpy as np
from numpy.testing import assert_raises
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_raises_regex
from skopt.space import LogN, Normalize
@pytest.mark.fast_test
def test_logn2_integer():
transformer = LogN(2)
for X in range(2, 31):
X_orig = transformer.inverse_transform(transformer.transform(X))
assert_array_equal(int(np.round(X_orig)), X)
@pytest.mark.fast_test
def test_logn10_integer():
transformer = LogN(2)
for X in range(2, 31):
X_orig = transformer.inverse_transform(transformer.transform(X))
assert_array_equal(int(np.round(X_orig)), X)
@pytest.mark.fast_test
def test_normalize_integer():
transformer = Normalize(1, 20, is_int=True)
assert transformer.transform(19.8) == 1.0
assert transformer.transform(20.2) == 1.0
assert transformer.transform(1.2) == 0.0
assert transformer.transform(0.9) == 0.0
assert_raises(ValueError, transformer.transform, 20.6)
assert_raises(ValueError, transformer.transform, 0.4)
assert transformer.inverse_transform(0.99) == 20
assert transformer.inverse_transform(0.01) == 1
assert_raises(ValueError, transformer.inverse_transform, 1. + 1e-8)
assert_raises(ValueError, transformer.transform, 0. - 1e-8)
@pytest.mark.fast_test
def test_normalize():
transformer = Normalize(1, 20, is_int=False)
assert transformer.transform(20.) == 1.0
assert transformer.transform(1.) == 0.0
assert_raises(ValueError, transformer.transform, 20. + 1e-7)
assert_raises(ValueError, transformer.transform, 1.0 - 1e-7)
assert_raises(ValueError, transformer.inverse_transform, 1. + 1e-8)
assert_raises(ValueError, transformer.transform, 0. - 1e-8)
| [((14, 18, 14, 25), 'skopt.space.LogN', 'LogN', ({(14, 23, 14, 24): '2'}, {}), '(2)', False, 'from skopt.space import LogN, Normalize\n'), ((22, 18, 22, 25), 'skopt.space.LogN', 'LogN', ({(22, 23, 22, 24): '2'}, {}), '(2)', False, 'from skopt.space import LogN, Normalize\n'), ((30, 18, 30, 47), 'skopt.space.Normalize', 'Normalize', (), '', False, 'from skopt.space import LogN, Normalize\n'), ((35, 4, 35, 58), 'numpy.testing.assert_raises', 'assert_raises', ({(35, 18, 35, 28): 'ValueError', (35, 30, 35, 51): 'transformer.transform', (35, 53, 35, 57): '(20.6)'}, {}), '(ValueError, transformer.transform, 20.6)', False, 'from numpy.testing import assert_raises\n'), ((36, 4, 36, 57), 'numpy.testing.assert_raises', 'assert_raises', ({(36, 18, 36, 28): 'ValueError', (36, 30, 36, 51): 'transformer.transform', (36, 53, 36, 56): '(0.4)'}, {}), '(ValueError, transformer.transform, 0.4)', False, 'from numpy.testing import assert_raises\n'), ((40, 4, 40, 71), 'numpy.testing.assert_raises', 'assert_raises', ({(40, 18, 40, 28): 'ValueError', (40, 30, 40, 59): 'transformer.inverse_transform', (40, 61, 40, 70): '(1.0 + 1e-08)'}, {}), '(ValueError, transformer.inverse_transform, 1.0 + 1e-08)', False, 'from numpy.testing import assert_raises\n'), ((41, 4, 41, 63), 'numpy.testing.assert_raises', 'assert_raises', ({(41, 18, 41, 28): 'ValueError', (41, 30, 41, 51): 'transformer.transform', (41, 53, 41, 62): '(0.0 - 1e-08)'}, {}), '(ValueError, transformer.transform, 0.0 - 1e-08)', False, 'from numpy.testing import assert_raises\n'), ((46, 18, 46, 48), 'skopt.space.Normalize', 'Normalize', (), '', False, 'from skopt.space import LogN, Normalize\n'), ((49, 4, 49, 64), 'numpy.testing.assert_raises', 'assert_raises', ({(49, 18, 49, 28): 'ValueError', (49, 30, 49, 51): 'transformer.transform', (49, 53, 49, 63): '(20.0 + 1e-07)'}, {}), '(ValueError, transformer.transform, 20.0 + 1e-07)', False, 'from numpy.testing import assert_raises\n'), ((50, 4, 50, 64), 'numpy.testing.assert_raises', 'assert_raises', ({(50, 18, 50, 28): 'ValueError', (50, 30, 50, 51): 'transformer.transform', (50, 53, 50, 63): '(1.0 - 1e-07)'}, {}), '(ValueError, transformer.transform, 1.0 - 1e-07)', False, 'from numpy.testing import assert_raises\n'), ((51, 4, 51, 71), 'numpy.testing.assert_raises', 'assert_raises', ({(51, 18, 51, 28): 'ValueError', (51, 30, 51, 59): 'transformer.inverse_transform', (51, 61, 51, 70): '(1.0 + 1e-08)'}, {}), '(ValueError, transformer.inverse_transform, 1.0 + 1e-08)', False, 'from numpy.testing import assert_raises\n'), ((52, 4, 52, 63), 'numpy.testing.assert_raises', 'assert_raises', ({(52, 18, 52, 28): 'ValueError', (52, 30, 52, 51): 'transformer.transform', (52, 53, 52, 62): '(0.0 - 1e-08)'}, {}), '(ValueError, transformer.transform, 0.0 - 1e-08)', False, 'from numpy.testing import assert_raises\n'), ((17, 31, 17, 47), 'numpy.round', 'np.round', ({(17, 40, 17, 46): 'X_orig'}, {}), '(X_orig)', True, 'import numpy as np\n'), ((25, 31, 25, 47), 'numpy.round', 'np.round', ({(25, 40, 25, 46): 'X_orig'}, {}), '(X_orig)', True, 'import numpy as np\n')] |
dspoka/mnm | tokenization_numerical.py | f212e8d5697a4556c6469d469a2930b203667828 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import sys
import unicodedata
from io import open
from transformers import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip('\n')
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertNumericalTokenizer(PreTrainedTokenizer):
r"""
Constructs a BertTokenizer.
:class:`~pytorch_transformers.BertTokenizer` runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying BERT model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None,
unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]",
mask_token="[MASK]", tokenize_chinese_chars=True, **kwargs):
"""Constructs a BertNumericalTokenizer.
Args:
**vocab_file**: Path to a one-wordpiece-per-line vocabulary file
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input
Only has an effect when do_basic_tokenize=True
**do_basic_tokenize**: (`optional`) boolean (default True)
Whether to do basic tokenization before wordpiece.
**never_split**: (`optional`) list of string
List of tokens which will never be split during tokenization.
Only has an effect when do_basic_tokenize=True
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
super(BertNumericalTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token,
pad_token=pad_token, cls_token=cls_token,
mask_token=mask_token, **kwargs)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertNumericalTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.unk_num = '[UNK_NUM]'
self.default_value = 1.0
never_split = ['[UNK_NUM]']
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
self.numerical_tokenizer = NumericalTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token, unk_num=self.unk_num)
@property
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text, get_values=False, get_sigfigs=None, get_numeric_masks=None):
split_tokens = []
numeric_values = []
numeric_masks = []
split_sigfigs = []
i = 0
for (token, sigfig) in self.numerical_tokenizer.tokenize(text, never_split=self.all_special_tokens):
for (sub_token, numeric_value, numeric_mask) in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
numeric_values.append(numeric_value)
numeric_masks.append(numeric_mask)
if numeric_value != self.default_value:
split_sigfigs.append(sigfig)
else:
split_sigfigs.append('-1')
if numeric_value != self.default_value and sub_token != self.unk_num:
print(sub_token, numeric_value)
foohere
if get_numeric_masks:
return numeric_masks
if get_values:
return numeric_values
assert len(split_tokens) == len(numeric_values) == len(split_sigfigs)
if get_sigfigs:
return split_sigfigs
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file'])
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return (vocab_file,)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
""" Instantiate a BertNumericalTokenizer from pre-trained vocabulary files.
"""
if pretrained_model_name_or_path in PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES:
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
return super(BertNumericalTokenizer, cls)._from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
class NumericalTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=None):
""" Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text, never_split=None):
""" Basic Numerical Tokenization of a piece of text.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
# digits = '0123456789'
# punctuation = '$%'
# text = self._clean_text(text)
# orig_tokens = whitespace_tokenize(text)
split_tokens, split_sigfigs = normalize_numbers_in_sent(text)
output_tokens = whitespace_tokenize(" ".join(split_tokens))
output_sigfigs = whitespace_tokenize(" ".join(split_sigfigs))
return zip(output_tokens,split_sigfigs)
# return output_tokens,
# _numbers = '[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?'
# fraction_pattern = re.compile(_fraction)
# number_pattern = re.compile(_numbers)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):
""" Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
self.tokenize_chinese_chars = tokenize_chinese_chars
def tokenize(self, text, never_split=None):
""" Basic Tokenization of a piece of text.
Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
never_split = self.never_split + (never_split if never_split is not None else [])
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
#dont split on periods if number is before it
# if _is_punctuation(char) and not chars[i-1].isdigit() or _is_punctuation(char) and i == 0:
if _is_punctuation(char):
if i == 0:
do_split = True
elif i == len(chars)-1:
do_split = True
else:
if not chars[i-1].isdigit():
do_split = True
else:
do_split = False
else:
do_split = False
if do_split:
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, unk_num, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.unk_num = unk_num
self.default_value = 1.0
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
numeric_values = []
numeric_mask = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
numeric_values.append(self.default_value)
numeric_mask.append(0)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
try:
if token not in ['infinity', 'inf', 'nan']:
numeric_value = float(token)
is_number = True
else:
is_number = False
except:
ValueError
is_number = False
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab and is_number == False:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_number:
#ACTUAL NUMBER HERE
output_tokens.append(self.unk_num)
numeric_values.append(numeric_value)
numeric_mask.append(1)
elif is_bad:
output_tokens.append(self.unk_token)
numeric_values.append(self.default_value)#-9e9
numeric_mask.append(0)
else:
numeric_values.extend([self.default_value]*len(sub_tokens))#-9e9
numeric_mask.extend([0]*len(sub_tokens))
output_tokens.extend(sub_tokens)
assert len(numeric_values) == len(output_tokens) == len(numeric_mask)
return zip(output_tokens, numeric_values, numeric_mask)
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
# if cat.startswith("P") and cp != 46:
if cat.startswith("P"):
return True
return False
################
#
Small = {
'zero': 0.0,
'one': 1.0,
'two': 2.0,
'three': 3.0,
'four': 4.0,
'five': 5.0,
'six': 6.0,
'seven': 7.0,
'eight': 8.0,
'nine': 9.0,
'ten': 10.0,
'eleven': 11.0,
'twelve': 12.0,
'thirteen': 13.0,
'fourteen': 14.0,
'fifteen': 15.0,
'sixteen': 16.0,
'seventeen': 17.0,
'eighteen': 18.0,
'nineteen': 19.0,
'twenty': 20.0,
'thirty': 30.0,
'forty': 40.0,
'fifty': 50.0,
'sixty': 60.0,
'seventy': 70.0,
'eighty': 80.0,
'ninety': 90.0
}
Magnitude = {
'thousand': 1000.0,
'million': 1000000.0,
'billion': 1000000000.0,
'trillion': 1000000000000.0,
'quadrillion': 1000000000000000.0,
'quintillion': 1000000000000000000.0,
'sextillion': 1000000000000000000000.0,
'septillion': 1000000000000000000000000.0,
'octillion': 1000000000000000000000000000.0,
'nonillion': 1000000000000000000000000000000.0,
}
class NumberException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def text2num(sent):
if type(sent) is str:
words = [word.lower() for word in sent.strip().split()]
elif type(sent) is list:
words = [word.lower() for word in sent]
# n = 0
# g = 0
mantissa = 0
# number = 0.0
for i, word in enumerate(words):
if i == 0:
mantissa = Small.get(word, None)
if mantissa is None:
try:
mantissa = float(word)
except ValueError:
raise NumberException("First must be a number of sorts")
elif i != 0:
magnitude = Magnitude.get(word, None)
if magnitude is not None:
mantissa = mantissa*magnitude
else: # non-number word
raise NumberException("Unknown number: "+word)
return mantissa
def generate_ngrams(sentence, n):
return zip(*[sentence[i:] for i in range(n)])
def check_int(s):
if s[0] in ('-', '+'):
return s[1:].isdigit()
return s.isdigit()
def preprocess(sent, remove_pos=False, never_split=None):
"""
Preprocess the sentence by:
. remove commas from numbers (2,000 -> 2000)
. remove endings from ordinal numbers (2nd -> 2)
. convert "a {hundred,thousand...}" to "one {hundred,thousand,...}" so it can be handled by text2num function
. convert "digit digitword" (24 hundred) -> 2400
and return the sentence's preprocessed list of words that should be passed into text2num.
"""
if remove_pos:
words = [word[:word.rfind('_')] for word in sent.strip().split()]
else:
words = [word for word in sent.strip().split()]
tokenizer = BasicTokenizer(do_lower_case=True, never_split=never_split)
words = tokenizer.tokenize(sent)
# sent = ' '.join(tokens)
words_lower = [word.lower() for word in words]
# remove commas from numbers "2,000" -> 2000 and remove endings from ordinal numbers
for i in range(len(words)):
new_word = words_lower[i].replace(',', '')
if new_word.endswith(('th', 'rd', 'st', 'nd')):
new_word = new_word[:-2]
try:
if new_word not in ['infinity', 'inf', 'nan']:
int_word = float(new_word)
# words[i] = str(int_word)
words[i] = new_word
except ValueError:
pass # only modify this word if it's an int after preprocessing
Magnitude_with_hundred = Magnitude.copy()
Magnitude_with_hundred['hundred'] = 100
# convert "a {hundred,thousand,million,...}" to "one {hundred,thousand,million,...}"
for i in range(len(words)-1):
if words_lower[i] == 'a' and words_lower[i+1] in Magnitude_with_hundred:
words[i] = 'one'
# convert "24 {Magnitude}" -> 24000000000000 (mix of digits and words)
new_words = []
sigs = []
i = 0
while i < len(words)-1:
if check_int(words_lower[i]) and words_lower[i+1] in Magnitude_with_hundred:
new_words.append(str(float(words_lower[i]) * Magnitude_with_hundred[words_lower[i+1]]))
sigs.append(f'{words_lower[i]} {words_lower[i+1]}')
i += 1
else:
new_words.append(words[i])
sigs.append('')
if i == len(words) - 2:
new_words.append(words[i+1])
sigs.append('')
i += 1
return new_words, sigs
#
#
def normalize_numbers_in_sent(sent, remove_pos=False, never_split=None):
"""
Given a sentence, perform preprocessing and normalize number words to digits.
:param sent: sentence (str)
:return: a list of normalized words from the sentence
"""
out_words = []
words, sigfigs = preprocess(sent, remove_pos, never_split)
out_sigfigs = []
i = 0
while i < len(words):
for j in range(len(words), i, -1):
try:
number = str(text2num(words[i:j]))
if sigfigs[i] == '':
out_sigfigs.append(' '.join(words[i:j]))
else:
out_sigfigs.append(sigfigs[i])
out_words.append(number)
i = j-1 # skip this sequence since we replaced it with a number
break
except NumberException:
if j == i+1:
out_sigfigs.append('-1')
out_words.append(words[i])
i += 1
assert len(out_sigfigs) == len(out_words)
return out_words, out_sigfigs | [((27, 9, 27, 36), 'logging.getLogger', 'logging.getLogger', ({(27, 27, 27, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((68, 12, 68, 37), 'collections.OrderedDict', 'collections.OrderedDict', ({}, {}), '()', False, 'import collections\n'), ((533, 10, 533, 36), 'unicodedata.category', 'unicodedata.category', ({(533, 31, 533, 35): 'char'}, {}), '(char)', False, 'import unicodedata\n'), ((545, 10, 545, 36), 'unicodedata.category', 'unicodedata.category', ({(545, 31, 545, 35): 'char'}, {}), '(char)', False, 'import unicodedata\n'), ((561, 10, 561, 36), 'unicodedata.category', 'unicodedata.category', ({(561, 31, 561, 35): 'char'}, {}), '(char)', False, 'import unicodedata\n'), ((69, 9, 69, 48), 'io.open', 'open', (), '', False, 'from io import open\n'), ((214, 11, 214, 36), 'os.path.isdir', 'os.path.isdir', ({(214, 25, 214, 35): 'vocab_path'}, {}), '(vocab_path)', False, 'import os\n'), ((344, 15, 344, 49), 'unicodedata.normalize', 'unicodedata.normalize', ({(344, 37, 344, 42): '"""NFD"""', (344, 44, 344, 48): 'text'}, {}), "('NFD', text)", False, 'import unicodedata\n'), ((130, 15, 130, 41), 'os.path.isfile', 'os.path.isfile', ({(130, 30, 130, 40): 'vocab_file'}, {}), '(vocab_file)', False, 'import os\n'), ((215, 25, 215, 82), 'os.path.join', 'os.path.join', ({(215, 38, 215, 48): 'vocab_path', (215, 50, 215, 81): "VOCAB_FILES_NAMES['vocab_file']"}, {}), "(vocab_path, VOCAB_FILES_NAMES['vocab_file'])", False, 'import os\n'), ((216, 13, 216, 52), 'io.open', 'open', (), '', False, 'from io import open\n'), ((347, 18, 347, 44), 'unicodedata.category', 'unicodedata.category', ({(347, 39, 347, 43): 'char'}, {}), '(char)', False, 'import unicodedata\n')] |
wheelerMT/spin-1_BEC | dipole/splitting_dipole.py | e8ea34699b4001847c6b4c7451c11be241ce598f | import numpy as np
import multiprocessing as mp
import pyfftw
from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan
from numpy import heaviside as heav
from include import helper
import h5py
# ---------Spatial and potential parameters--------------
Mx = My = 64
Nx = Ny = 128 # Number of grid pts
dx = dy = 1 / 2 # Grid spacing
dkx = pi / (Mx * dx)
dky = pi / (My * dy) # K-space spacing
len_x = Nx * dx # Box length
len_y = Ny * dy
x = np.arange(-Mx, Mx) * dx
y = np.arange(-My, My) * dy
X, Y = np.meshgrid(x, y) # Spatial meshgrid
data = h5py.File('../data/splitting_dipole_data.hdf5', 'a')
data.create_dataset('grid/x', x.shape, data=x)
data.create_dataset('grid/y', y.shape, data=y)
kx = np.fft.fftshift(np.arange(-Mx, Mx) * dkx)
ky = np.fft.fftshift(np.arange(-My, My) * dky)
Kx, Ky = np.meshgrid(kx, ky) # K-space meshgrid
# Initialising FFTs
cpu_count = mp.cpu_count()
wfn_data = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
fft_forward = pyfftw.FFTW(wfn_data, wfn_data, axes=(0, 1), threads=cpu_count)
fft_backward = pyfftw.FFTW(wfn_data, wfn_data, direction='FFTW_BACKWARD', axes=(0, 1), threads=cpu_count)
# Framework for wavefunction data
psi_plus_k = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
psi_0_k = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
psi_minus_k = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
# Controlled variables
V = 0. # Doubly periodic box
p = q = 0.
c0 = 2
c1 = 0.5 # Effective 3-component BEC
k = 0 # Array index
# ------------------------------ Generating SQV's -------------------------
# Euler angles
alpha = 0.
beta = pi / 4
gamma = 0.
N_vort = 2 # Number of vortices
pos = [-10, 0, 10, 0]
theta_k = np.empty((N_vort, Nx, Ny))
theta_tot = np.empty((Nx, Ny))
for k in range(N_vort // 2):
# Scaling positional arguments
Y_minus = 2 * pi * (Y - pos[k]) / len_y
X_minus = 2 * pi * (X - pos[N_vort // 2 + k]) / len_x
Y_plus = 2 * pi * (Y - pos[N_vort + k]) / len_y
X_plus = 2 * pi * (X - pos[3 * N_vort // 2 + k]) / len_x
x_plus = 2 * pi * pos[3 * N_vort // 2 + k] / len_x
x_minus = 2 * pi * pos[N_vort // 2 + k] / len_x
for nn in np.arange(-5, 5):
theta_k[k, :, :] += arctan(
tanh((Y_minus + 2 * pi * nn) / 2) * tan((X_minus - pi) / 2)) \
- arctan(tanh((Y_plus + 2 * pi * nn) / 2) * tan((X_plus - pi) / 2)) \
+ pi * (heav(X_plus, 1.) - heav(X_minus, 1.))
theta_k[k, :, :] -= (2 * pi * Y / len_y) * (x_plus - x_minus) / (2 * pi)
theta_tot += theta_k[k, :, :]
# Initial wavefunction
Psi = np.empty((3, Nx, Ny), dtype='complex128')
Psi[0, :, :] = np.zeros((Nx, Ny)) + 0j
Psi[1, :, :] = np.ones((Nx, Ny), dtype='complex128') * exp(1j * theta_tot)
Psi[2, :, :] = np.zeros((Nx, Ny)) + 0j
psi_plus, psi_0, psi_minus = helper.rotation(Psi, Nx, Ny, alpha, beta, gamma) # Performs rotation to wavefunction
# Aligning wavefunction to potentially speed up FFTs
pyfftw.byte_align(psi_plus)
pyfftw.byte_align(psi_0)
pyfftw.byte_align(psi_minus)
# ------------------------------------------------------------------------
# Normalisation constants
N_plus = dx * dy * np.linalg.norm(psi_plus) ** 2
N_0 = dx * dy * np.linalg.norm(psi_0) ** 2
N_minus = dx * dy * np.linalg.norm(psi_minus) ** 2
# Time steps, number and wavefunction save variables
Nt = 80000
Nframe = 200
dt = 5e-3
t = 0.
# Saving time variables:
data.create_dataset('time/Nt', data=Nt)
data.create_dataset('time/dt', data=dt)
data.create_dataset('time/Nframe', data=Nframe)
# Setting up variables to be sequentially saved:
psi_plus_save = data.create_dataset('wavefunction/psi_plus', (Nx, Ny, Nt/Nframe), dtype='complex128')
psi_0_save = data.create_dataset('wavefunction/psi_0', (Nx, Ny, Nt/Nframe), dtype='complex128')
psi_minus_save = data.create_dataset('wavefunction/psi_minus', (Nx, Ny, Nt/Nframe), dtype='complex128')
for i in range(Nt):
# Spin vector terms:
F_perp = sqrt(2.) * (conj(psi_plus) * psi_0 + conj(psi_0) * psi_minus)
Fz = abs(psi_plus) ** 2 - abs(psi_minus) ** 2
F = sqrt(abs(Fz) ** 2 + abs(F_perp) ** 2) # Magnitude of spin vector
# Total density
n = abs(psi_minus) ** 2 + abs(psi_0) ** 2 + abs(psi_plus) ** 2
# Sin and cosine terms for solution
C = cos(c1 * F * (-1j * dt))
if F.min() == 0:
S = np.zeros((Nx, Ny), dtype='complex128') # Ensures no division by zero
else:
S = 1j * sin(c1 * F * (-1j * dt)) / F
# Forward FFTs
fft_forward(psi_plus, psi_plus_k)
fft_forward(psi_0, psi_0_k)
fft_forward(psi_minus, psi_minus_k)
# Computing kinetic energy + quadratic Zeeman
psi_plus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
psi_0_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2)) / (Nx * Ny)
psi_minus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
# Inverse FFTs
fft_backward(psi_plus_k, psi_plus)
fft_backward(psi_0_k, psi_0)
fft_backward(psi_minus_k, psi_minus)
# Rescaling
psi_plus *= (Nx * Ny)
psi_0 *= (Nx * Ny)
psi_minus *= (Nx * Ny)
# Trap, linear Zeeman & interaction flow
psi_plus = ((C - S * Fz) * psi_plus - 1. / sqrt(2.) * S * conj(F_perp) * psi_0) * exp(-dt * (V - p + c0 * n))
psi_0 = (-1. / sqrt(2.) * S * F_perp * psi_plus + C * psi_0 - 1. / sqrt(2.) * S * conj(F_perp) * psi_minus) \
* exp(-dt * (V + c0 * n))
psi_minus = (-1. / sqrt(2.) * S * F_perp * psi_0 + (C + S * Fz) * psi_minus) * exp(-dt * (V + p + c0 * n))
# Forward FFTs
fft_forward(psi_plus, psi_plus_k)
fft_forward(psi_0, psi_0_k)
fft_forward(psi_minus, psi_minus_k)
# Computing kinetic energy + quadratic Zeeman
psi_plus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
psi_0_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2)) / (Nx * Ny)
psi_minus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
# Inverse FFTs
fft_backward(psi_plus_k, psi_plus)
fft_backward(psi_0_k, psi_0)
fft_backward(psi_minus_k, psi_minus)
# Rescaling
psi_plus *= (Nx * Ny)
psi_0 *= (Nx * Ny)
psi_minus *= (Nx * Ny)
# Renormalizing wavefunction
psi_plus *= sqrt(N_plus) / sqrt(dx * dy * np.linalg.norm(psi_plus) ** 2)
psi_0 *= sqrt(N_0) / sqrt(dx * dy * np.linalg.norm(psi_0) ** 2)
psi_minus *= sqrt(N_minus) / sqrt(dx * dy * np.linalg.norm(psi_minus) ** 2)
# Prints current time and saves data to an array
if np.mod(i, Nframe) == 0:
print('it = %1.4f' % t)
psi_plus_save[:, :, k] = psi_plus[:, :]
psi_0_save[:, :, k] = psi_0[:, :]
psi_minus_save[:, :, k] = psi_minus[:, :]
k += 1
t += dt
data.close()
| [((20, 7, 20, 24), 'numpy.meshgrid', 'np.meshgrid', ({(20, 19, 20, 20): 'x', (20, 22, 20, 23): 'y'}, {}), '(x, y)', True, 'import numpy as np\n'), ((22, 7, 22, 59), 'h5py.File', 'h5py.File', ({(22, 17, 22, 53): '"""../data/splitting_dipole_data.hdf5"""', (22, 55, 22, 58): '"""a"""'}, {}), "('../data/splitting_dipole_data.hdf5', 'a')", False, 'import h5py\n'), ((28, 9, 28, 28), 'numpy.meshgrid', 'np.meshgrid', ({(28, 21, 28, 23): 'kx', (28, 25, 28, 27): 'ky'}, {}), '(kx, ky)', True, 'import numpy as np\n'), ((31, 12, 31, 26), 'multiprocessing.cpu_count', 'mp.cpu_count', ({}, {}), '()', True, 'import multiprocessing as mp\n'), ((32, 11, 32, 61), 'pyfftw.empty_aligned', 'pyfftw.empty_aligned', (), '', False, 'import pyfftw\n'), ((33, 14, 33, 77), 'pyfftw.FFTW', 'pyfftw.FFTW', (), '', False, 'import pyfftw\n'), ((34, 15, 34, 105), 'pyfftw.FFTW', 'pyfftw.FFTW', (), '', False, 'import pyfftw\n'), ((37, 13, 37, 63), 'pyfftw.empty_aligned', 'pyfftw.empty_aligned', (), '', False, 'import pyfftw\n'), ((38, 10, 38, 60), 'pyfftw.empty_aligned', 'pyfftw.empty_aligned', (), '', False, 'import pyfftw\n'), ((39, 14, 39, 64), 'pyfftw.empty_aligned', 'pyfftw.empty_aligned', (), '', False, 'import pyfftw\n'), ((57, 10, 57, 36), 'numpy.empty', 'np.empty', ({(57, 19, 57, 35): '(N_vort, Nx, Ny)'}, {}), '((N_vort, Nx, Ny))', True, 'import numpy as np\n'), ((58, 12, 58, 30), 'numpy.empty', 'np.empty', ({(58, 21, 58, 29): '(Nx, Ny)'}, {}), '((Nx, Ny))', True, 'import numpy as np\n'), ((80, 6, 80, 47), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((84, 29, 84, 77), 'include.helper.rotation', 'helper.rotation', ({(84, 45, 84, 48): 'Psi', (84, 50, 84, 52): 'Nx', (84, 54, 84, 56): 'Ny', (84, 58, 84, 63): 'alpha', (84, 65, 84, 69): 'beta', (84, 71, 84, 76): 'gamma'}, {}), '(Psi, Nx, Ny, alpha, beta, gamma)', False, 'from include import helper\n'), ((87, 0, 87, 27), 'pyfftw.byte_align', 'pyfftw.byte_align', ({(87, 18, 87, 26): 'psi_plus'}, {}), '(psi_plus)', False, 'import pyfftw\n'), ((88, 0, 88, 24), 'pyfftw.byte_align', 'pyfftw.byte_align', ({(88, 18, 88, 23): 'psi_0'}, {}), '(psi_0)', False, 'import pyfftw\n'), ((89, 0, 89, 28), 'pyfftw.byte_align', 'pyfftw.byte_align', ({(89, 18, 89, 27): 'psi_minus'}, {}), '(psi_minus)', False, 'import pyfftw\n'), ((18, 4, 18, 22), 'numpy.arange', 'np.arange', ({(18, 14, 18, 17): '(-Mx)', (18, 19, 18, 21): 'Mx'}, {}), '(-Mx, Mx)', True, 'import numpy as np\n'), ((19, 4, 19, 22), 'numpy.arange', 'np.arange', ({(19, 14, 19, 17): '(-My)', (19, 19, 19, 21): 'My'}, {}), '(-My, My)', True, 'import numpy as np\n'), ((69, 14, 69, 30), 'numpy.arange', 'np.arange', ({(69, 24, 69, 26): '(-5)', (69, 28, 69, 29): '(5)'}, {}), '(-5, 5)', True, 'import numpy as np\n'), ((81, 15, 81, 33), 'numpy.zeros', 'np.zeros', ({(81, 24, 81, 32): '(Nx, Ny)'}, {}), '((Nx, Ny))', True, 'import numpy as np\n'), ((82, 15, 82, 52), 'numpy.ones', 'np.ones', (), '', True, 'import numpy as np\n'), ((82, 55, 82, 74), 'numpy.exp', 'exp', ({(82, 59, 82, 73): '(1.0j * theta_tot)'}, {}), '(1.0j * theta_tot)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((83, 15, 83, 33), 'numpy.zeros', 'np.zeros', ({(83, 24, 83, 32): '(Nx, Ny)'}, {}), '((Nx, Ny))', True, 'import numpy as np\n'), ((123, 8, 123, 32), 'numpy.cos', 'cos', ({(123, 12, 123, 31): 'c1 * F * (-1.0j * dt)'}, {}), '(c1 * F * (-1.0j * dt))', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((26, 21, 26, 39), 'numpy.arange', 'np.arange', ({(26, 31, 26, 34): '-Mx', (26, 36, 26, 38): 'Mx'}, {}), '(-Mx, Mx)', True, 'import numpy as np\n'), ((27, 21, 27, 39), 'numpy.arange', 'np.arange', ({(27, 31, 27, 34): '-My', (27, 36, 27, 38): 'My'}, {}), '(-My, My)', True, 'import numpy as np\n'), ((93, 19, 93, 43), 'numpy.linalg.norm', 'np.linalg.norm', ({(93, 34, 93, 42): 'psi_plus'}, {}), '(psi_plus)', True, 'import numpy as np\n'), ((94, 16, 94, 37), 'numpy.linalg.norm', 'np.linalg.norm', ({(94, 31, 94, 36): 'psi_0'}, {}), '(psi_0)', True, 'import numpy as np\n'), ((95, 20, 95, 45), 'numpy.linalg.norm', 'np.linalg.norm', ({(95, 35, 95, 44): 'psi_minus'}, {}), '(psi_minus)', True, 'import numpy as np\n'), ((115, 13, 115, 21), 'numpy.sqrt', 'sqrt', ({(115, 18, 115, 20): '(2.0)'}, {}), '(2.0)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((125, 12, 125, 50), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((135, 18, 135, 63), 'numpy.exp', 'exp', ({(135, 22, 135, 62): '(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q))'}, {}), '(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q))', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((136, 15, 136, 52), 'numpy.exp', 'exp', ({(136, 19, 136, 51): '(-0.25 * dt * (Kx ** 2 + Ky ** 2))'}, {}), '(-0.25 * dt * (Kx ** 2 + Ky ** 2))', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((137, 19, 137, 64), 'numpy.exp', 'exp', ({(137, 23, 137, 63): '(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q))'}, {}), '(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q))', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((150, 86, 150, 113), 'numpy.exp', 'exp', ({(150, 90, 150, 112): '(-dt * (V - p + c0 * n))'}, {}), '(-dt * (V - p + c0 * n))', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((152, 14, 152, 37), 'numpy.exp', 'exp', ({(152, 18, 152, 36): '(-dt * (V + c0 * n))'}, {}), '(-dt * (V + c0 * n))', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((153, 83, 153, 110), 'numpy.exp', 'exp', ({(153, 87, 153, 109): '(-dt * (V + p + c0 * n))'}, {}), '(-dt * (V + p + c0 * n))', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((161, 18, 161, 63), 'numpy.exp', 'exp', ({(161, 22, 161, 62): '(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q))'}, {}), '(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q))', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((162, 15, 162, 52), 'numpy.exp', 'exp', ({(162, 19, 162, 51): '(-0.25 * dt * (Kx ** 2 + Ky ** 2))'}, {}), '(-0.25 * dt * (Kx ** 2 + Ky ** 2))', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((163, 19, 163, 64), 'numpy.exp', 'exp', ({(163, 23, 163, 63): '(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q))'}, {}), '(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q))', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((176, 16, 176, 28), 'numpy.sqrt', 'sqrt', ({(176, 21, 176, 27): 'N_plus'}, {}), '(N_plus)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((177, 13, 177, 22), 'numpy.sqrt', 'sqrt', ({(177, 18, 177, 21): 'N_0'}, {}), '(N_0)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((178, 17, 178, 30), 'numpy.sqrt', 'sqrt', ({(178, 22, 178, 29): 'N_minus'}, {}), '(N_minus)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((181, 7, 181, 24), 'numpy.mod', 'np.mod', ({(181, 14, 181, 15): 'i', (181, 17, 181, 23): 'Nframe'}, {}), '(i, Nframe)', True, 'import numpy as np\n'), ((115, 25, 115, 39), 'numpy.conj', 'conj', ({(115, 30, 115, 38): 'psi_plus'}, {}), '(psi_plus)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((115, 50, 115, 61), 'numpy.conj', 'conj', ({(115, 55, 115, 60): 'psi_0'}, {}), '(psi_0)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((127, 17, 127, 41), 'numpy.sin', 'sin', ({(127, 21, 127, 40): '(c1 * F * (-1.0j * dt))'}, {}), '(c1 * F * (-1.0j * dt))', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((73, 36, 73, 52), 'numpy.heaviside', 'heav', ({(73, 41, 73, 47): 'X_plus', (73, 49, 73, 51): '(1.0)'}, {}), '(X_plus, 1.0)', True, 'from numpy import heaviside as heav\n'), ((73, 55, 73, 72), 'numpy.heaviside', 'heav', ({(73, 60, 73, 67): 'X_minus', (73, 69, 73, 71): '(1.0)'}, {}), '(X_minus, 1.0)', True, 'from numpy import heaviside as heav\n'), ((150, 62, 150, 74), 'numpy.conj', 'conj', ({(150, 67, 150, 73): 'F_perp'}, {}), '(F_perp)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((151, 86, 151, 98), 'numpy.conj', 'conj', ({(151, 91, 151, 97): 'F_perp'}, {}), '(F_perp)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((176, 46, 176, 70), 'numpy.linalg.norm', 'np.linalg.norm', ({(176, 61, 176, 69): 'psi_plus'}, {}), '(psi_plus)', True, 'import numpy as np\n'), ((177, 40, 177, 61), 'numpy.linalg.norm', 'np.linalg.norm', ({(177, 55, 177, 60): 'psi_0'}, {}), '(psi_0)', True, 'import numpy as np\n'), ((178, 48, 178, 73), 'numpy.linalg.norm', 'np.linalg.norm', ({(178, 63, 178, 72): 'psi_minus'}, {}), '(psi_minus)', True, 'import numpy as np\n'), ((71, 12, 71, 45), 'numpy.tanh', 'tanh', ({(71, 17, 71, 44): '((Y_minus + 2 * pi * nn) / 2)'}, {}), '((Y_minus + 2 * pi * nn) / 2)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((71, 48, 71, 71), 'numpy.tan', 'tan', ({(71, 52, 71, 70): '((X_minus - pi) / 2)'}, {}), '((X_minus - pi) / 2)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((72, 37, 72, 69), 'numpy.tanh', 'tanh', ({(72, 42, 72, 68): '((Y_plus + 2 * pi * nn) / 2)'}, {}), '((Y_plus + 2 * pi * nn) / 2)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((72, 72, 72, 94), 'numpy.tan', 'tan', ({(72, 76, 72, 93): '((X_plus - pi) / 2)'}, {}), '((X_plus - pi) / 2)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((150, 47, 150, 55), 'numpy.sqrt', 'sqrt', ({(150, 52, 150, 54): '(2.0)'}, {}), '(2.0)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((151, 71, 151, 79), 'numpy.sqrt', 'sqrt', ({(151, 76, 151, 78): '(2.0)'}, {}), '(2.0)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((153, 23, 153, 31), 'numpy.sqrt', 'sqrt', ({(153, 28, 153, 30): '(2.0)'}, {}), '(2.0)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n'), ((151, 19, 151, 27), 'numpy.sqrt', 'sqrt', ({(151, 24, 151, 26): '(2.0)'}, {}), '(2.0)', False, 'from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan\n')] |
cam-laf/vectorcast-execution-plugin | src/main/resources/scripts/crumbDiag.py | fd54e8580886084d040d21fa809be8a609d44d8e | from __future__ import print_function
import requests
import sys
import os
verbose=True
try:
username=os.environ['USERNAME']
password=os.environ['PASSWORD']
except:
print("Crumb Diaganostic requires USERNAME/PASSWORD to be set as environment variables")
sys.exit(-1)
jenkins_url=os.environ['JENKINS_URL']
url = jenkins_url + 'crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)'
print(url)
if username:
crumb = requests.get(url, auth=(username, password))
if crumb.status_code == 200:
crumb_headers = dict()
crumb_headers[crumb.text.split(":")[0]] = crumb.text.split(":")[1]
if verbose:
print("Got crumb: %s" % crumb.text)
else:
print("Failed to get crumb")
print("\nYou may need to enable \"Prevent Cross Site Request Forgery exploits\" from:")
print("Manage Jenkins > Configure Global Security > CSRF Protection and select the appropriate Crumb Algorithm")
print(jenkins_url + "/configureSecurity")
sys.exit(-1)
| [((17, 12, 17, 56), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((12, 4, 12, 16), 'sys.exit', 'sys.exit', ({(12, 13, 12, 15): '(-1)'}, {}), '(-1)', False, 'import sys\n'), ((28, 8, 28, 20), 'sys.exit', 'sys.exit', ({(28, 17, 28, 19): '(-1)'}, {}), '(-1)', False, 'import sys\n')] |
vicenteneto/online-judge-solutions | URI/1-Beginner/1099.py | 4176e2387658f083b980d7b49bc98300a4c28411 | # -*- coding: utf-8 -*-
for i in range(int(raw_input())):
x, y = [int(x) for x in raw_input().split()]
if x > y:
x, y = y, x
x += 1 if x % 2 == 0 else 2
print sum([j for j in range(x, y, 2)])
| [] |
MahirGulzar/fpointnet-tiny | mock_file.py | e79406f648573d50fa3988ca987db652ab1286b8 | import tensorflow as tf
FLIPPING_TENSOR = tf.constant([1.0, -1.0, 1.0])
@tf.function
def sample_data(points, labels, num_point):
if tf.random.uniform(shape=()) >= 0.5:
return points * FLIPPING_TENSOR, labels
return points, labels
mock_data = tf.constant([
[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]
])
mock_labels = tf.constant([
[1.], [0.], [1.]
])
sampling_lambda = lambda x, y: sample_data(x, y, 512)
train_data = tf.data.Dataset.from_tensors((mock_data, mock_labels)) \
.map(sampling_lambda) \
.unbatch() \
.batch(1) \
.repeat(5)
for x, y in train_data:
print(x) | [((3, 18, 3, 47), 'tensorflow.constant', 'tf.constant', ({(3, 30, 3, 46): '[1.0, -1.0, 1.0]'}, {}), '([1.0, -1.0, 1.0])', True, 'import tensorflow as tf\n'), ((13, 12, 17, 2), 'tensorflow.constant', 'tf.constant', ({(13, 24, 17, 1): '[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]'}, {}), '([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])', True, 'import tensorflow as tf\n'), ((19, 14, 21, 2), 'tensorflow.constant', 'tf.constant', ({(19, 26, 21, 1): '[[1.0], [0.0], [1.0]]'}, {}), '([[1.0], [0.0], [1.0]])', True, 'import tensorflow as tf\n'), ((7, 7, 7, 34), 'tensorflow.random.uniform', 'tf.random.uniform', (), '', True, 'import tensorflow as tf\n'), ((25, 13, 25, 67), 'tensorflow.data.Dataset.from_tensors', 'tf.data.Dataset.from_tensors', ({(25, 42, 25, 66): '(mock_data, mock_labels)'}, {}), '((mock_data, mock_labels))', True, 'import tensorflow as tf\n')] |
dataholiks/flask_heroku_scheduler | myapp.py | d2b4c2c8fdee066aea729c1566bfbaf52c068557 | from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return 'This is the app index page.'
| [((2, 6, 2, 21), 'flask.Flask', 'Flask', ({(2, 12, 2, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask\n')] |
bostud/day_ok | day_ok/schedule/migrations/0027_auto_20210216_1337.py | 2bcee68252b698f5818808d1766fb3ec3f07fce8 | # Generated by Django 3.1.6 on 2021-02-16 11:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schedule', '0026_event'),
]
operations = [
migrations.AlterField(
model_name='group',
name='students',
field=models.ManyToManyField(blank=True, to='schedule.Student', verbose_name='Учні'),
),
migrations.AlterField(
model_name='teacher',
name='subjects',
field=models.ManyToManyField(blank=True, to='schedule.Subject', verbose_name='Предмети'),
),
]
| [((16, 18, 16, 100), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (), '', False, 'from django.db import migrations, models\n'), ((21, 18, 21, 108), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (), '', False, 'from django.db import migrations, models\n')] |
OliverChao/PyWhoAmI | Blog.py | 8742e0a44c4e673d038779b01b14b0cfb7d5395f | import aiohttp
import asyncio
import time
import time
import argparse
import glob
import os
import shutil
import random
import re
import requests
import sys
from concurrent import futures
import pdfkit
import time
from retrying import retry
from pygments import highlight
from pygments.lexers import guess_lexer, get_lexer_by_name
from pygments.lexers import CppLexer
from pygments.formatters.terminal import TerminalFormatter
from pygments.util import ClassNotFound
from pyquery import PyQuery as pq
from requests.exceptions import ConnectionError
from requests.exceptions import SSLError
import numbers
if sys.version < '3':
import codecs
from urllib import quote as url_quote
from urllib import getproxies
# Handling Unicode: http://stackoverflow.com/a/6633040/305414
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
from urllib.request import getproxies
from urllib.parse import quote as url_quote
def u(x):
return x
scripFilePath = os.path.split(os.path.realpath(__file__))[0]
PDF_DIR = os.path.join(scripFilePath,'whoamiPDFdir')
CPP_DIR = os.path.join(scripFilePath,'whoamiCPPdir')
class Result(object):
def __init__(self, host, args):
self.args = args
self.host = host
self._search_url = 'https://www.bing.com/search?q=site:{0}%20{1}'
self._USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0',
# 'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) '
'Chrome/19.0.1084.46 Safari/536.5'),
('Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46'
'Safari/536.5'), )
self.data = self.whoami()
def __call__(self, *args, **kwargs):
return self.show_results()
def __len__(self):
return len(self.data)
def whoami(self):
self.args['query'] = ' '.join(self.args['query']).replace('?', '')
try:
return self.confirm_links() or 'Sorry, couldn\'t find any help with that topic\n'
except (ConnectionError, SSLError):
return 'Failed to establish network connection\n'
def confirm_links(self):
dic = self._get_dict(self.args['query'])
if not dic:
return False
'''先不检验。。测试多个域名。。'''
return dic
# def _is_article(link):
# return re.search('article/details/\d+', link)
# # question_links = [link for link in links if _is_article(link)]
# # https://blog.csdn.net/u013177568/article/details/62432761
# confirm_dict = {k: v for k, v in dic.items() if _is_article(v)}
# return confirm_dict
def _get_dict(self, query):
search_url = self._search_url.format(self.host, url_quote(query))
# search_url : site:blog.csdn.net 1173 HDU
result = self._get_result(search_url)
html = pq(result)
# return the anser_list
return self._extract_links(html, 'bing')
@retry(stop_max_attempt_number=3)
def _get_result(self, url):
try:
return requests.get(url, headers={'User-Agent': random.choice(self._USER_AGENTS)}, ).text
# verify = VERIFY_SSL_CERTIFICATE).text
except requests.exceptions.SSLError as e:
print('[ERROR] Encountered an SSL Error.\n')
print('[*]retrying again automatically ')
raise e
def _extract_links(self, html, search_engine):
if search_engine == 'bing':
return self._extract_dict_from_bing(html)
return None
@staticmethod
def _extract_dict_from_bing(html):
html.remove_namespaces()
dic = {}
for a in html('.b_algo')('h2')('a'):
# name ='[*{0}*] {1}'.format(str(num),a.text)
name = a.text
link = a.attrib['href']
dic[name] = str(link)
# num+=1
return dic
def show_results(self):
if isinstance(self.data,str):
print('[!!] ',self.data)
return
num = 0
for k, v in self.data.items():
print('[*{}*] '.format(str(num)), end='')
print(k, end=' [*link*] ')
print(v)
num += 1
class Blog(Result):
def __init__(self, host, args):
super().__init__(host, args)
self.links = list(self.data.values())
def show_code(self):
url = list(self.data.values())[self.args['print']]
main_page = self._parse_url(url)
s = self._get_code(main_page, self.args) or 'sorry,this article has no code...'
print(s)
def save_to_pdf(self, url):
html_template = u"""
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
</head>
<body>
<!-- <center><h1>{title}</h1></center> -->
{content}
</body>
</html>
"""
options = {
'page-size': 'Letter',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
'encoding': "UTF-8",
'custom-header': [
('Accept-Encoding', 'gzip')
],
'cookie': [
('cookie-name1', 'cookie-value1'),
('cookie-name2', 'cookie-value2'),
],
'outline-depth': 10,
}
main_page = self._parse_url(url)
title = main_page('h1').eq(0).text()
title = re.sub('[<>\?\\\/:\*\s\[\]\(\)\-]', '.', title)
html = html_template.format(title='Oliver loves Annabelle forever~', content=main_page.html())
if not os.path.exists(PDF_DIR):
os.makedirs(PDF_DIR)
filePath = os.path.join(PDF_DIR, title + '.pdf')
if self._test_is_open_if_exists(filePath):
return
try:
print('[*] save to ', filePath)
self._save_to_pdf(html,filePath)
print('[*] successfully ')
except:
print('[!!]要保存的网页可能有网页冲突')
print('[注]保存html等语言的文档冲突的几率较大')
print('[!!]save failed')
print('[!!]如果事因为图片路径造成的保存失败,文字和代码部分则会正常生成pdf,')
try:
# 系统级命令好像try不到。。。
self.open_after_save(filePath)
except:
print('[!!]文件未打开,可能保存时发生IO错误。。')
print('[!!]请重新生成pdf,或者,该网页的结构不符合生成pdf的标准')
print('[~~]请见谅。。。。')
@staticmethod
def _save_to_pdf(html, filepath):
wkhtmltopdf_path = scripFilePath + '/wkhtmltox/bin/wkhtmltopdf.exe'
config = pdfkit.configuration(wkhtmltopdf=wkhtmltopdf_path)
pdfkit.from_string(html, filepath, configuration=config)
def open_after_save(self, pdf_path):
if not self.args['open_pdf']:
return
try:
if len(self.args['save']):
return False
except TypeError as e:
pass
# if args['pdf'] and PDFpath.split('.')[-1]!='pdf':
# PDFpath += '.pdf'
os.popen(pdf_path)
def _test_is_open_if_exists(self, file_path):
try:
if len(self.args['save']):
return False
except TypeError as e:
pass
if self.args['open_pdf']:
if os.path.exists(file_path):
print('文件已经存在,直接打开')
os.popen(file_path)
return True
else:
return False
def _parse_url(self, url):
'''
:param url: 网页url
:return: 返回网页的主要区域的pyquery
'''
page = self._get_result(url)
html = pq(page)
# the main part of the article
return html('.blog-content-box')
def _get_code(self, main_page, args):
'''
:param main_page:main_page=_parse_url(url)
:param args: args
:return: str
'''
html = main_page('article')('pre')('code') or main_page('article')('pre')
if not html:
return None
ans = []
ans_split = '\n' + '<==>' * 17 + '\n'
if args['all_code']:
for node in html:
node = pq(node)
s = node.html()
# s=re.sub('</?[^>]+>','',s)
s = re.sub('<((span)|(code)|(/span)|(/code)){1}.*?>', '', s)
s = s.replace('>', '>').replace('<', '<')
ans.append(self._add_color(s, args))
else:
node = pq(html[-1])
s = node.html()
s = re.sub('<((span)|(code)|(/span)|(/code)){1}.*?>', '', s)
s = s.replace('>', '>').replace('<', '<')
ans.append(self._add_color(s, args))
return ans_split.join(ans)
@staticmethod
def _add_color(code, args):
if not args['color']:
return code
lexer = None
try:
lexer = guess_lexer(code)
except ClassNotFound:
return code
return highlight(code, CppLexer(), TerminalFormatter(bg='dark'))
def save_to_cpp(self):
ans_split = '\n' + '<==>' * 17 + '\n'
url = self.links[self.args['number_link']]
main_page = self._parse_url(url)
title = main_page('h1').eq(0).text()
title = re.sub('[<>\?\\\/:\*\s]', '.', title)
s = self._get_code(main_page, self.args)
if not s:
print('sorry , this article has no code...')
print('please try another...')
return
if not os.path.exists(CPP_DIR):
os.makedirs(CPP_DIR)
filePath = os.path.join(CPP_DIR, title + '.cpp')
if self._test_is_open_if_exists(filePath):
return
code = s.split(ans_split)[-1]
with open(filePath, 'w')as f:
f.write(code)
print('[*]save successfully...')
try:
self.open_after_save(filePath)
except:
print('[!!]文件未打开,可能保存时发生IO错误。。')
print('[!!]open failed') | [((44, 10, 44, 52), 'os.path.join', 'os.path.join', ({(44, 23, 44, 36): 'scripFilePath', (44, 37, 44, 51): '"""whoamiPDFdir"""'}, {}), "(scripFilePath, 'whoamiPDFdir')", False, 'import os\n'), ((45, 10, 45, 52), 'os.path.join', 'os.path.join', ({(45, 23, 45, 36): 'scripFilePath', (45, 37, 45, 51): '"""whoamiCPPdir"""'}, {}), "(scripFilePath, 'whoamiCPPdir')", False, 'import os\n'), ((97, 5, 97, 37), 'retrying.retry', 'retry', (), '', False, 'from retrying import retry\n'), ((43, 30, 43, 56), 'os.path.realpath', 'os.path.realpath', ({(43, 47, 43, 55): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((93, 15, 93, 25), 'pyquery.PyQuery', 'pq', ({(93, 18, 93, 24): 'result'}, {}), '(result)', True, 'from pyquery import PyQuery as pq\n'), ((181, 16, 181, 63), 're.sub', 're.sub', ({(181, 23, 181, 50): '"""[<>\\\\?\\\\\\\\/:\\\\*\\\\s\\\\[\\\\]\\\\(\\\\)\\\\-]"""', (181, 52, 181, 55): '"""."""', (181, 57, 181, 62): 'title'}, {}), "('[<>\\\\?\\\\\\\\/:\\\\*\\\\s\\\\[\\\\]\\\\(\\\\)\\\\-]', '.', title)", False, 'import re\n'), ((185, 19, 185, 56), 'os.path.join', 'os.path.join', ({(185, 32, 185, 39): 'PDF_DIR', (185, 41, 185, 55): "title + '.pdf'"}, {}), "(PDF_DIR, title + '.pdf')", False, 'import os\n'), ((210, 17, 210, 67), 'pdfkit.configuration', 'pdfkit.configuration', (), '', False, 'import pdfkit\n'), ((211, 8, 211, 64), 'pdfkit.from_string', 'pdfkit.from_string', (), '', False, 'import pdfkit\n'), ((224, 8, 224, 26), 'os.popen', 'os.popen', ({(224, 17, 224, 25): 'pdf_path'}, {}), '(pdf_path)', False, 'import os\n'), ((248, 15, 248, 23), 'pyquery.PyQuery', 'pq', ({(248, 18, 248, 22): 'page'}, {}), '(page)', True, 'from pyquery import PyQuery as pq\n'), ((295, 16, 295, 53), 're.sub', 're.sub', ({(295, 23, 295, 40): '"""[<>\\\\?\\\\\\\\/:\\\\*\\\\s]"""', (295, 42, 295, 45): '"""."""', (295, 47, 295, 52): 'title'}, {}), "('[<>\\\\?\\\\\\\\/:\\\\*\\\\s]', '.', title)", False, 'import re\n'), ((303, 19, 303, 56), 'os.path.join', 'os.path.join', ({(303, 32, 303, 39): 'CPP_DIR', (303, 41, 303, 55): "title + '.cpp'"}, {}), "(CPP_DIR, title + '.cpp')", False, 'import os\n'), ((36, 15, 36, 46), 'codecs.unicode_escape_decode', 'codecs.unicode_escape_decode', ({(36, 44, 36, 45): 'x'}, {}), '(x)', False, 'import codecs\n'), ((90, 56, 90, 72), 'urllib.parse.quote', 'url_quote', ({(90, 66, 90, 71): 'query'}, {}), '(query)', True, 'from urllib.parse import quote as url_quote\n'), ((183, 15, 183, 38), 'os.path.exists', 'os.path.exists', ({(183, 30, 183, 37): 'PDF_DIR'}, {}), '(PDF_DIR)', False, 'import os\n'), ((184, 12, 184, 32), 'os.makedirs', 'os.makedirs', ({(184, 24, 184, 31): 'PDF_DIR'}, {}), '(PDF_DIR)', False, 'import os\n'), ((234, 15, 234, 40), 'os.path.exists', 'os.path.exists', ({(234, 30, 234, 39): 'file_path'}, {}), '(file_path)', False, 'import os\n'), ((272, 19, 272, 31), 'pyquery.PyQuery', 'pq', ({(272, 22, 272, 30): 'html[-1]'}, {}), '(html[-1])', True, 'from pyquery import PyQuery as pq\n'), ((274, 16, 274, 72), 're.sub', 're.sub', ({(274, 23, 274, 64): '"""<((span)|(code)|(/span)|(/code)){1}.*?>"""', (274, 66, 274, 68): '""""""', (274, 70, 274, 71): 's'}, {}), "('<((span)|(code)|(/span)|(/code)){1}.*?>', '', s)", False, 'import re\n'), ((285, 20, 285, 37), 'pygments.lexers.guess_lexer', 'guess_lexer', ({(285, 32, 285, 36): 'code'}, {}), '(code)', False, 'from pygments.lexers import guess_lexer, get_lexer_by_name\n'), ((288, 31, 288, 41), 'pygments.lexers.CppLexer', 'CppLexer', ({}, {}), '()', False, 'from pygments.lexers import CppLexer\n'), ((288, 43, 288, 71), 'pygments.formatters.terminal.TerminalFormatter', 'TerminalFormatter', (), '', False, 'from pygments.formatters.terminal import TerminalFormatter\n'), ((301, 15, 301, 38), 'os.path.exists', 'os.path.exists', ({(301, 30, 301, 37): 'CPP_DIR'}, {}), '(CPP_DIR)', False, 'import os\n'), ((302, 12, 302, 32), 'os.makedirs', 'os.makedirs', ({(302, 24, 302, 31): 'CPP_DIR'}, {}), '(CPP_DIR)', False, 'import os\n'), ((236, 16, 236, 35), 'os.popen', 'os.popen', ({(236, 25, 236, 34): 'file_path'}, {}), '(file_path)', False, 'import os\n'), ((265, 23, 265, 31), 'pyquery.PyQuery', 'pq', ({(265, 26, 265, 30): 'node'}, {}), '(node)', True, 'from pyquery import PyQuery as pq\n'), ((268, 20, 268, 76), 're.sub', 're.sub', ({(268, 27, 268, 68): '"""<((span)|(code)|(/span)|(/code)){1}.*?>"""', (268, 70, 268, 72): '""""""', (268, 74, 268, 75): 's'}, {}), "('<((span)|(code)|(/span)|(/code)){1}.*?>', '', s)", False, 'import re\n'), ((100, 60, 100, 92), 'random.choice', 'random.choice', ({(100, 74, 100, 91): 'self._USER_AGENTS'}, {}), '(self._USER_AGENTS)', False, 'import random\n')] |
dslowikowski/commcare-hq | corehq/apps/app_manager/tests/test_xml_parsing.py | ad8885cf8dab69dc85cb64f37aeaf06106124797 | from django.test import SimpleTestCase as TestCase
from corehq.apps.app_manager.models import _parse_xml
import os
class XMLParsingTest(TestCase):
def testUnicodeError(self):
"""Tests a bug found in Unicode processing of a form"""
file_path = os.path.join(os.path.dirname(__file__), "data", "unicode_error_form.xhtml")
with open(file_path, "rb") as f:
xml_data = f.read()
try:
_parse_xml(xml_data) # this should not raise an error
except:
self.fail("Parsing normal string data shouldn't fail!")
try:
_parse_xml(unicode(xml_data))
except:
self.fail("Parsing unicode data shouldn't fail!")
| [((9, 33, 9, 58), 'os.path.dirname', 'os.path.dirname', ({(9, 49, 9, 57): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((13, 12, 13, 32), 'corehq.apps.app_manager.models._parse_xml', '_parse_xml', ({(13, 23, 13, 31): 'xml_data'}, {}), '(xml_data)', False, 'from corehq.apps.app_manager.models import _parse_xml\n')] |
NeonOcean/Environment | S4/S4 Library/generated/protocolbuffers/Localization_pb2.py | ca658cf66e8fd6866c22a4a0136d415705b36d26 | import protocolbuffers.Consts_pb2 as Consts_pb2
from google.protobuf import descriptor, message, reflection
DESCRIPTOR = descriptor.FileDescriptor(name = 'Localization.proto', package = 'EA.Sims4.Network',
serialized_pb = '\n\x12Localization.proto\x12\x10EA.Sims4.Network\x1a\x0cConsts.proto"\x85\n\n\x14LocalizedStringToken\x12G\n\x04type\x18\x01 \x02(\x0e20.EA.Sims4.Network.LocalizedStringToken.TokenType:\x07INVALID\x126\n\x08rdl_type\x18\x02 \x01(\x0e2$.EA.Sims4.Network.SocialRichDataType\x12\x12\n\nfirst_name\x18\x03 \x01(\t\x12\x11\n\tlast_name\x18\x04 \x01(\t\x12\x15\n\rfull_name_key\x18\x05 \x01(\r\x12\x11\n\tis_female\x18\x06 \x01(\x08\x12\x0e\n\x06sim_id\x18\x07 \x01(\x04\x126\n\x0btext_string\x18\x08 \x01(\x0b2!.EA.Sims4.Network.LocalizedString\x12\x0e\n\x06number\x18\t \x01(\x02\x12\x12\n\npersona_id\x18\n \x01(\x04\x12\x12\n\naccount_id\x18\x0b \x01(\x04\x12\x16\n\x0epersona_string\x18\x0c \x01(\t\x12\x0f\n\x07zone_id\x18\r \x01(\x04\x12\x10\n\x08world_id\x18\x0e \x01(\r\x12\x11\n\tzone_name\x18\x0f \x01(\t\x12\x10\n\x08event_id\x18\x10 \x01(\x04\x12\x17\n\x0fevent_type_hash\x18\x11 \x01(\r\x12\x17\n\x0fskill_name_hash\x18\x12 \x01(\r\x12\x13\n\x0bskill_level\x18\x13 \x01(\r\x12\x12\n\nskill_guid\x18\x14 \x01(\x04\x12\x17\n\x0ftrait_name_hash\x18\x15 \x01(\r\x12\x12\n\ntrait_guid\x18\x16 \x01(\x04\x12\x15\n\rbit_name_hash\x18\x17 \x01(\r\x12\x10\n\x08bit_guid\x18\x18 \x01(\x04\x12\x18\n\x10catalog_name_key\x18\x19 \x01(\r\x12\x1f\n\x17catalog_description_key\x18\x1a \x01(\r\x12\x13\n\x0bcustom_name\x18\x1b \x01(\t\x12\x1a\n\x12custom_description\x18\x1c \x01(\t\x12\x12\n\ncareer_uid\x18\x1d \x01(\x04\x12\x11\n\tmemory_id\x18\x1e \x01(\x04\x12\x1a\n\x12memory_string_hash\x18\x1f \x01(\r\x12\x10\n\x08raw_text\x18 \x01(\t\x12A\n\rdate_and_time\x18! \x01(\x0b2*.EA.Sims4.Network.LocalizedDateAndTimeData\x12E\n\x08sim_list\x18" \x03(\x0b23.EA.Sims4.Network.LocalizedStringToken.SubTokenData\x1a¨\x01\n\x0cSubTokenData\x12G\n\x04type\x18\x01 \x02(\x0e20.EA.Sims4.Network.LocalizedStringToken.TokenType:\x07INVALID\x12\x12\n\nfirst_name\x18\x02 \x01(\t\x12\x11\n\tlast_name\x18\x03 \x01(\t\x12\x15\n\rfull_name_key\x18\x04 \x01(\r\x12\x11\n\tis_female\x18\x05 \x01(\x08"\x93\x01\n\tTokenType\x12\x0b\n\x07INVALID\x10\x00\x12\x07\n\x03SIM\x10\x01\x12\n\n\x06STRING\x10\x02\x12\x0c\n\x08RAW_TEXT\x10\x03\x12\n\n\x06NUMBER\x10\x04\x12\n\n\x06OBJECT\x10\x05\x12\x11\n\rDATE_AND_TIME\x10\x06\x12\x0c\n\x08RICHDATA\x10\x07\x12\x0f\n\x0bSTRING_LIST\x10\x08\x12\x0c\n\x08SIM_LIST\x10\t"\x9e\x01\n\x18LocalizedDateAndTimeData\x12\x0f\n\x07seconds\x18\x01 \x01(\r\x12\x0f\n\x07minutes\x18\x02 \x01(\r\x12\r\n\x05hours\x18\x03 \x01(\r\x12\x0c\n\x04date\x18\x04 \x01(\r\x12\r\n\x05month\x18\x05 \x01(\r\x12\x11\n\tfull_year\x18\x06 \x01(\r\x12!\n\x19date_and_time_format_hash\x18\x07 \x01(\r"W\n\x0fLocalizedString\x12\x0c\n\x04hash\x18\x01 \x02(\r\x126\n\x06tokens\x18\x02 \x03(\x0b2&.EA.Sims4.Network.LocalizedStringToken"W\n\x17LocalizedStringValidate\x12<\n\x11localized_strings\x18\x01 \x03(\x0b2!.EA.Sims4.Network.LocalizedString')
_LOCALIZEDSTRINGTOKEN_TOKENTYPE = descriptor.EnumDescriptor(name = 'TokenType', full_name = 'EA.Sims4.Network.LocalizedStringToken.TokenType', filename = None, file = DESCRIPTOR,
values = [
descriptor.EnumValueDescriptor(name = 'INVALID', index = 0, number = 0, options = None, type = None),
descriptor.EnumValueDescriptor(name = 'SIM', index = 1, number = 1, options = None, type = None),
descriptor.EnumValueDescriptor(name = 'STRING', index = 2, number = 2, options = None, type = None),
descriptor.EnumValueDescriptor(name = 'RAW_TEXT', index = 3, number = 3, options = None, type = None),
descriptor.EnumValueDescriptor(name = 'NUMBER', index = 4, number = 4, options = None, type = None),
descriptor.EnumValueDescriptor(name = 'OBJECT', index = 5, number = 5, options = None, type = None),
descriptor.EnumValueDescriptor(name = 'DATE_AND_TIME', index = 6, number = 6, options = None, type = None),
descriptor.EnumValueDescriptor(name = 'RICHDATA', index = 7, number = 7, options = None, type = None),
descriptor.EnumValueDescriptor(name = 'STRING_LIST', index = 8, number = 8, options = None, type = None),
descriptor.EnumValueDescriptor(name = 'SIM_LIST', index = 9, number = 9, options = None, type = None)], containing_type = None, options = None, serialized_start = 1193, serialized_end = 1340)
_LOCALIZEDSTRINGTOKEN_SUBTOKENDATA = descriptor.Descriptor(name = 'SubTokenData', full_name = 'EA.Sims4.Network.LocalizedStringToken.SubTokenData', filename = None, file = DESCRIPTOR, containing_type = None, fields = [
descriptor.FieldDescriptor(name = 'type', full_name = 'EA.Sims4.Network.LocalizedStringToken.SubTokenData.type', index = 0, number = 1, type = 14, cpp_type = 8, label = 2, has_default_value = True, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'first_name', full_name = 'EA.Sims4.Network.LocalizedStringToken.SubTokenData.first_name', index = 1, number = 2, type = 9, cpp_type = 9, label = 1, has_default_value = False, default_value = b''.decode('utf-8'), message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'last_name', full_name = 'EA.Sims4.Network.LocalizedStringToken.SubTokenData.last_name', index = 2, number = 3, type = 9, cpp_type = 9, label = 1, has_default_value = False, default_value = b''.decode('utf-8'), message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'full_name_key', full_name = 'EA.Sims4.Network.LocalizedStringToken.SubTokenData.full_name_key', index = 3, number = 4, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'is_female', full_name = 'EA.Sims4.Network.LocalizedStringToken.SubTokenData.is_female', index = 4, number = 5, type = 8, cpp_type = 7, label = 1, has_default_value = False, default_value = False, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None)], extensions = [], nested_types = [], enum_types = [], options = None, is_extendable = False, extension_ranges = [], serialized_start = 1022, serialized_end = 1190)
_LOCALIZEDSTRINGTOKEN = descriptor.Descriptor(
name = 'LocalizedStringToken',
full_name = 'EA.Sims4.Network.LocalizedStringToken',
filename = None,
file = DESCRIPTOR,
containing_type = None,
fields = [
descriptor.FieldDescriptor(name = 'type', full_name = 'EA.Sims4.Network.LocalizedStringToken.type', index = 0, number = 1, type = 14, cpp_type = 8, label = 2, has_default_value = True, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'rdl_type', full_name = 'EA.Sims4.Network.LocalizedStringToken.rdl_type', index = 1, number = 2, type = 14, cpp_type = 8, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'first_name', full_name = 'EA.Sims4.Network.LocalizedStringToken.first_name', index = 2, number = 3, type = 9, cpp_type = 9, label = 1, has_default_value = False, default_value = b''.decode('utf-8'), message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'last_name', full_name = 'EA.Sims4.Network.LocalizedStringToken.last_name', index = 3, number = 4, type = 9, cpp_type = 9, label = 1, has_default_value = False, default_value = b''.decode('utf-8'), message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'full_name_key', full_name = 'EA.Sims4.Network.LocalizedStringToken.full_name_key', index = 4, number = 5, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'is_female', full_name = 'EA.Sims4.Network.LocalizedStringToken.is_female', index = 5, number = 6, type = 8, cpp_type = 7, label = 1, has_default_value = False, default_value = False, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'sim_id', full_name = 'EA.Sims4.Network.LocalizedStringToken.sim_id', index = 6, number = 7, type = 4, cpp_type = 4, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'text_string', full_name = 'EA.Sims4.Network.LocalizedStringToken.text_string', index = 7, number = 8, type = 11, cpp_type = 10, label = 1, has_default_value = False, default_value = None, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'number', full_name = 'EA.Sims4.Network.LocalizedStringToken.number', index = 8, number = 9, type = 2, cpp_type = 6, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'persona_id', full_name = 'EA.Sims4.Network.LocalizedStringToken.persona_id', index = 9, number = 10, type = 4, cpp_type = 4, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'account_id', full_name = 'EA.Sims4.Network.LocalizedStringToken.account_id', index = 10, number = 11, type = 4, cpp_type = 4, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'persona_string', full_name = 'EA.Sims4.Network.LocalizedStringToken.persona_string', index = 11, number = 12, type = 9, cpp_type = 9, label = 1, has_default_value = False, default_value = b''.decode('utf-8'), message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'zone_id', full_name = 'EA.Sims4.Network.LocalizedStringToken.zone_id', index = 12, number = 13, type = 4, cpp_type = 4, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'world_id', full_name = 'EA.Sims4.Network.LocalizedStringToken.world_id', index = 13, number = 14, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'zone_name', full_name = 'EA.Sims4.Network.LocalizedStringToken.zone_name', index = 14, number = 15, type = 9, cpp_type = 9, label = 1, has_default_value = False, default_value = b''.decode('utf-8'), message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'event_id', full_name = 'EA.Sims4.Network.LocalizedStringToken.event_id', index = 15, number = 16, type = 4, cpp_type = 4, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'event_type_hash', full_name = 'EA.Sims4.Network.LocalizedStringToken.event_type_hash', index = 16, number = 17, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'skill_name_hash', full_name = 'EA.Sims4.Network.LocalizedStringToken.skill_name_hash', index = 17, number = 18, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'skill_level', full_name = 'EA.Sims4.Network.LocalizedStringToken.skill_level', index = 18, number = 19, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'skill_guid', full_name = 'EA.Sims4.Network.LocalizedStringToken.skill_guid', index = 19, number = 20, type = 4, cpp_type = 4, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'trait_name_hash', full_name = 'EA.Sims4.Network.LocalizedStringToken.trait_name_hash', index = 20, number = 21, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'trait_guid', full_name = 'EA.Sims4.Network.LocalizedStringToken.trait_guid', index = 21, number = 22, type = 4, cpp_type = 4, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'bit_name_hash', full_name = 'EA.Sims4.Network.LocalizedStringToken.bit_name_hash', index = 22, number = 23, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'bit_guid', full_name = 'EA.Sims4.Network.LocalizedStringToken.bit_guid', index = 23, number = 24, type = 4, cpp_type = 4, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'catalog_name_key', full_name = 'EA.Sims4.Network.LocalizedStringToken.catalog_name_key', index = 24, number = 25, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'catalog_description_key', full_name = 'EA.Sims4.Network.LocalizedStringToken.catalog_description_key', index = 25, number = 26, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'custom_name', full_name = 'EA.Sims4.Network.LocalizedStringToken.custom_name', index = 26, number = 27, type = 9, cpp_type = 9, label = 1, has_default_value = False, default_value = b''.decode('utf-8'), message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'custom_description', full_name = 'EA.Sims4.Network.LocalizedStringToken.custom_description', index = 27, number = 28, type = 9, cpp_type = 9, label = 1, has_default_value = False, default_value = b''.decode('utf-8'), message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'career_uid', full_name = 'EA.Sims4.Network.LocalizedStringToken.career_uid', index = 28, number = 29, type = 4, cpp_type = 4, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'memory_id', full_name = 'EA.Sims4.Network.LocalizedStringToken.memory_id', index = 29, number = 30, type = 4, cpp_type = 4, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'memory_string_hash', full_name = 'EA.Sims4.Network.LocalizedStringToken.memory_string_hash', index = 30, number = 31, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'raw_text', full_name = 'EA.Sims4.Network.LocalizedStringToken.raw_text', index = 31, number = 32, type = 9, cpp_type = 9, label = 1, has_default_value = False, default_value = b''.decode('utf-8'), message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'date_and_time', full_name = 'EA.Sims4.Network.LocalizedStringToken.date_and_time', index = 32, number = 33, type = 11, cpp_type = 10, label = 1, has_default_value = False, default_value = None, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'sim_list', full_name = 'EA.Sims4.Network.LocalizedStringToken.sim_list', index = 33, number = 34, type = 11, cpp_type = 10, label = 3, has_default_value = False, default_value = [], message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None)],
extensions = [],
nested_types = [_LOCALIZEDSTRINGTOKEN_SUBTOKENDATA],
enum_types = [_LOCALIZEDSTRINGTOKEN_TOKENTYPE],
options = None,
is_extendable = False,
extension_ranges = [],
serialized_start = 55,
serialized_end = 1340
)
_LOCALIZEDDATEANDTIMEDATA = descriptor.Descriptor(name = 'LocalizedDateAndTimeData', full_name = 'EA.Sims4.Network.LocalizedDateAndTimeData', filename = None, file = DESCRIPTOR, containing_type = None, fields = [
descriptor.FieldDescriptor(name = 'seconds', full_name = 'EA.Sims4.Network.LocalizedDateAndTimeData.seconds', index = 0, number = 1, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'minutes', full_name = 'EA.Sims4.Network.LocalizedDateAndTimeData.minutes', index = 1, number = 2, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'hours', full_name = 'EA.Sims4.Network.LocalizedDateAndTimeData.hours', index = 2, number = 3, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'date', full_name = 'EA.Sims4.Network.LocalizedDateAndTimeData.date', index = 3, number = 4, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'month', full_name = 'EA.Sims4.Network.LocalizedDateAndTimeData.month', index = 4, number = 5, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'full_year', full_name = 'EA.Sims4.Network.LocalizedDateAndTimeData.full_year', index = 5, number = 6, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'date_and_time_format_hash', full_name = 'EA.Sims4.Network.LocalizedDateAndTimeData.date_and_time_format_hash', index = 6, number = 7, type = 13, cpp_type = 3, label = 1, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None)], extensions = [], nested_types = [], enum_types = [], options = None, is_extendable = False, extension_ranges = [], serialized_start = 1343, serialized_end = 1501)
_LOCALIZEDSTRING = descriptor.Descriptor(name = 'LocalizedString', full_name = 'EA.Sims4.Network.LocalizedString', filename = None, file = DESCRIPTOR, containing_type = None, fields = [
descriptor.FieldDescriptor(name = 'hash', full_name = 'EA.Sims4.Network.LocalizedString.hash', index = 0, number = 1, type = 13, cpp_type = 3, label = 2, has_default_value = False, default_value = 0, message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None),
descriptor.FieldDescriptor(name = 'tokens', full_name = 'EA.Sims4.Network.LocalizedString.tokens', index = 1, number = 2, type = 11, cpp_type = 10, label = 3, has_default_value = False, default_value = [], message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None)], extensions = [], nested_types = [], enum_types = [], options = None, is_extendable = False, extension_ranges = [], serialized_start = 1503,
serialized_end = 1590)
_LOCALIZEDSTRINGVALIDATE = descriptor.Descriptor(name = 'LocalizedStringValidate', full_name = 'EA.Sims4.Network.LocalizedStringValidate', filename = None, file = DESCRIPTOR, containing_type = None, fields = [descriptor.FieldDescriptor(name = 'localized_strings', full_name = 'EA.Sims4.Network.LocalizedStringValidate.localized_strings', index = 0, number = 1, type = 11, cpp_type = 10, label = 3, has_default_value = False, default_value = [], message_type = None, enum_type = None, containing_type = None, is_extension = False, extension_scope = None, options = None)], extensions = [], nested_types = [], enum_types = [], options = None, is_extendable = False, extension_ranges = [], serialized_start = 1592, serialized_end = 1679)
_LOCALIZEDSTRINGTOKEN_SUBTOKENDATA.fields_by_name['type'].enum_type = _LOCALIZEDSTRINGTOKEN_TOKENTYPE
_LOCALIZEDSTRINGTOKEN_SUBTOKENDATA.containing_type = _LOCALIZEDSTRINGTOKEN
_LOCALIZEDSTRINGTOKEN.fields_by_name['type'].enum_type = _LOCALIZEDSTRINGTOKEN_TOKENTYPE
_LOCALIZEDSTRINGTOKEN.fields_by_name['rdl_type'].enum_type = Consts_pb2._SOCIALRICHDATATYPE
_LOCALIZEDSTRINGTOKEN.fields_by_name['text_string'].message_type = _LOCALIZEDSTRING
_LOCALIZEDSTRINGTOKEN.fields_by_name['date_and_time'].message_type = _LOCALIZEDDATEANDTIMEDATA
_LOCALIZEDSTRINGTOKEN.fields_by_name['sim_list'].message_type = _LOCALIZEDSTRINGTOKEN_SUBTOKENDATA
_LOCALIZEDSTRINGTOKEN_TOKENTYPE.containing_type = _LOCALIZEDSTRINGTOKEN
_LOCALIZEDSTRING.fields_by_name['tokens'].message_type = _LOCALIZEDSTRINGTOKEN
_LOCALIZEDSTRINGVALIDATE.fields_by_name['localized_strings'].message_type = _LOCALIZEDSTRING
DESCRIPTOR.message_types_by_name['LocalizedStringToken'] = _LOCALIZEDSTRINGTOKEN
DESCRIPTOR.message_types_by_name['LocalizedDateAndTimeData'] = _LOCALIZEDDATEANDTIMEDATA
DESCRIPTOR.message_types_by_name['LocalizedString'] = _LOCALIZEDSTRING
DESCRIPTOR.message_types_by_name['LocalizedStringValidate'] = _LOCALIZEDSTRINGVALIDATE
class LocalizedStringToken(message.Message, metaclass = reflection.GeneratedProtocolMessageType):
class SubTokenData(message.Message, metaclass = reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _LOCALIZEDSTRINGTOKEN_SUBTOKENDATA
DESCRIPTOR = _LOCALIZEDSTRINGTOKEN
class LocalizedDateAndTimeData(message.Message, metaclass = reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _LOCALIZEDDATEANDTIMEDATA
class LocalizedString(message.Message, metaclass = reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _LOCALIZEDSTRING
class LocalizedStringValidate(message.Message, metaclass = reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _LOCALIZEDSTRINGVALIDATE
| [((4, 13, 5, 2867), 'google.protobuf.descriptor.FileDescriptor', 'descriptor.FileDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((8, 16, 8, 116), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((9, 16, 9, 112), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((10, 16, 10, 115), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((11, 16, 11, 117), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((12, 16, 12, 115), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((13, 16, 13, 115), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((14, 16, 14, 122), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((15, 16, 15, 117), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((16, 16, 16, 120), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((17, 16, 17, 117), 'google.protobuf.descriptor.EnumValueDescriptor', 'descriptor.EnumValueDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((19, 1, 19, 342), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((22, 1, 22, 361), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((23, 1, 23, 356), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((32, 2, 32, 330), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((33, 2, 33, 339), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((36, 2, 36, 349), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((37, 2, 37, 344), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((38, 2, 38, 334), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((39, 2, 39, 349), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((40, 2, 40, 334), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((41, 2, 41, 343), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((42, 2, 42, 344), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((44, 2, 44, 338), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((45, 2, 45, 341), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((47, 2, 47, 340), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((48, 2, 48, 355), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((49, 2, 49, 355), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((50, 2, 50, 347), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((51, 2, 51, 344), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((52, 2, 52, 355), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((53, 2, 53, 344), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((54, 2, 54, 351), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((55, 2, 55, 340), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((56, 2, 56, 357), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((57, 2, 57, 371), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((60, 2, 60, 344), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((61, 2, 61, 342), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((62, 2, 62, 361), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((64, 2, 64, 355), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((65, 2, 65, 343), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((76, 1, 76, 340), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((77, 1, 77, 340), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((78, 1, 78, 336), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((79, 1, 79, 334), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((80, 1, 80, 336), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((81, 1, 81, 344), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((82, 1, 82, 376), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((84, 1, 84, 325), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((85, 1, 85, 331), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n'), ((87, 209, 87, 569), 'google.protobuf.descriptor.FieldDescriptor', 'descriptor.FieldDescriptor', (), '', False, 'from google.protobuf import descriptor, message, reflection\n')] |
fumiyanll23/algo-method | dynamic_programming/01/01-06.py | d86ea1d399cbc5a1db0ae49d0c82e41042f661ab | # input
N, M = map(int, input().split())
Ds = [*map(int, input().split())]
# compute
dp = [False] * (N+1)
for ni in range(N+1):
if ni == 0:
dp[ni] = True
for D in Ds:
if ni >= D:
dp[ni] = dp[ni] or dp[ni-D]
# output
print("Yes" if dp[-1] else "No")
| [] |
ArshSood/sorting | swapsort.py | 97e1188ad626420e8ffeab992f7e98a2a91ae4b1 | # sorting
n=int(input())
array=list(map(int,input().split()))
i=0
count=[]
counter=0
while i<len(array):
min=i
start=i+1
while(start<len(array)):
if array[start]<array[min]:
min=start
start+=1
if i!=min:
array[i],array[min]=array[min],array[i]
count.append(i)
count.append(min)
counter+=1
i+=1
print(counter)
for i in range(0,len(count)):
print(count[i],end=" ")
| [] |
mucciz/News | tests/news_test.py | 2484d91edaef181d9a6d4b86d6bee822781f931d | import unittest
from app.models import News
# News = news.News
class NewsTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Movie class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_news = News('abc-news','ABC NEWS','Your trusted source for breaking news, analysis, exclusive interviews, headlines, and videos at ABCNews.com.','http://www.abc.net.au/news','business','au')
def test_instance(self):
self.assertTrue(isinstance(self.new_news,News))
def test_init(self):
self.assertEqual(self.new_news.id,'abc-news')
self.assertEqual(self.new_news.name,'ABC NEWS')
self.assertEqual(self.new_news.description,'Your trusted source for breaking news, analysis, exclusive interviews, headlines, and videos at ABCNews.com.')
self.assertEqual(self.new_news.url,'http://www.abc.net.au/news')
self.assertEqual(self.new_news.country,'au')
# if __name__ == '__main__':
# unittest.main()
| [((14, 24, 14, 207), 'app.models.News', 'News', ({(14, 29, 14, 39): '"""abc-news"""', (14, 40, 14, 50): '"""ABC NEWS"""', (14, 51, 14, 161): '"""Your trusted source for breaking news, analysis, exclusive interviews, headlines, and videos at ABCNews.com."""', (14, 162, 14, 190): '"""http://www.abc.net.au/news"""', (14, 191, 14, 201): '"""business"""', (14, 202, 14, 206): '"""au"""'}, {}), "('abc-news', 'ABC NEWS',\n 'Your trusted source for breaking news, analysis, exclusive interviews, headlines, and videos at ABCNews.com.'\n , 'http://www.abc.net.au/news', 'business', 'au')", False, 'from app.models import News\n')] |
MQasimSarfraz/cilium | test/get-gh-comment-info.py | 89b622cf4e0a960e27e5b1bf9f139abee25dfea0 | import argparse
parser = argparse.ArgumentParser()
parser.add_argument('ghcomment', type=str) # this is for test-me-please phrases
parser.add_argument('--focus', type=str, default="")
parser.add_argument('--version', type=str, default="")
parser.add_argument('--retrieve', type=str, default="focus")
args = parser.parse_args()
print args.__dict__[args.retrieve]
| [] |
madcat1991/clustered_cars | preprocessing/booking.py | a79b83d9d14360c6c51d4bf462217ef690e62c74 | """
This script cleans and prepares the data set of bookings for the future usage
"""
import argparse
import logging
import sys
import pandas as pd
from preprocessing.common import canonize_datetime, raw_data_to_df, check_processed_columns, check_data
OLD_BREAKPOINT_MATCHER = {
2001: [
(1, 1, "New Year"), (1, 6, "Winter"),
(2, 17, "Half Terms"), (2, 24, "Spring and Autumn"),
(4, 7, "Easter"), (4, 21, "Spring and Autumn"),
(5, 26, "SBH"),
(6, 2, "Early Summer"),
(7, 21, "Summer holidays"),
(9, 1, "Early Autumn"), (9, 15, "Spring and Autumn"),
(10, 27, "Half Terms"),
(11, 3, "Winter"),
(12, 22, "Christmas"), (12, 29, "New Year"),
],
2002: [
(1, 1, "New Year"), (1, 5, "Winter"),
(2, 16, "Half Terms"), (2, 23, "Spring and Autumn"),
(4, 6, "Easter"), (4, 20, "Spring and Autumn"),
(5, 25, "SBH"),
(6, 1, "Early Summer"),
(7, 20, "Summer holidays"),
(8, 31, "Early Autumn"),
(9, 14, "Spring and Autumn"),
(10, 26, "Half Terms"),
(11, 2, "Winter"),
(12, 21, "Christmas"), (12, 28, "New Year"),
],
2003: [
(1, 1, "New Year"), (1, 4, "Winter"),
(2, 15, "Half Terms"), (2, 22, "Spring and Autumn"),
(4, 5, "Easter"), (4, 19, "Spring and Autumn"),
(5, 24, "SBH"), (5, 31, "Early Summer"),
(7, 19, "Summer holidays"),
(8, 30, "Early Autumn"),
(9, 13, "Spring and Autumn"),
(10, 25, "Half Terms"),
(11, 1, "Winter"),
(12, 20, "Christmas"), (12, 27, "New Year"),
],
2004: [
(1, 1, "New Year"), (1, 3, "Winter"),
(2, 14, "Half Terms"), (2, 21, "Spring and Autumn"),
(4, 3, "Easter"), (4, 17, "Spring and Autumn"),
(5, 22, "SBH"), (5, 29, "Early Summer"),
(7, 17, "Summer holidays"),
(8, 28, "Early Autumn"),
(9, 11, "Spring and Autumn"),
(10, 23, "Half Terms"), (10, 30, "Winter"),
(12, 18, "Christmas"),
],
2005: [
(1, 1, "Winter"),
(2, 12, "Half Terms"), (2, 19, "Spring and Autumn"),
(4, 2, "Easter"), (4, 16, "Spring and Autumn"),
(5, 21, "SBH"), (5, 28, "Early Summer"),
(7, 16, "Summer holidays"),
(8, 27, "Early Autumn"),
(9, 10, "Spring and Autumn"),
(10, 22, "Half Terms"), (10, 29, "Winter"),
(12, 17, "Christmas"), (12, 31, "New Year"),
],
2006: [
(1, 1, "New Year"), (1, 7, "Winter"),
(2, 18, "Half Terms"), (2, 25, "Spring and Autumn"),
(4, 8, "Easter"), (4, 22, "Spring and Autumn"),
(5, 27, "SBH"),
(6, 3, "Early Summer"),
(7, 22, "Summer holidays"),
(9, 2, "Early Autumn"), (9, 16, "Spring and Autumn"),
(10, 28, "Half Terms"),
(11, 4, "Winter"),
(12, 23, "Christmas"), (12, 30, "New Year"),
],
2007: [
(1, 1, "New Year"), (1, 6, "Winter"),
(2, 17, "Half Terms"), (2, 24, "Spring and Autumn"),
(4, 7, "Easter"),
(4, 21, "Spring and Autumn"),
(5, 26, "SBH"),
(6, 2, "Early Summer"),
(7, 21, "Summer holidays"),
(9, 1, "Early Autumn"), (9, 15, "Spring and Autumn"),
(10, 27, "Half Terms"),
(11, 3, "Winter"),
(12, 22, "Christmas"), (12, 29, "New Year"),
],
2008: [
(1, 1, "New Year"), (1, 5, "Winter"),
(2, 16, "Half Terms"), (2, 23, "Spring and Autumn"),
(3, 22, "Easter"),
(4, 19, "Spring and Autumn"),
(5, 24, "SBH"), (5, 31, "Early Summer"),
(7, 19, "Summer holidays"),
(8, 30, "Early Autumn"),
(9, 13, "Spring and Autumn"),
(10, 25, "Half Terms"),
(11, 1, "Winter"),
(12, 20, "Christmas"),
],
}
COLS_TO_DROP = [
'pname', 'region', 'sleeps', 'stars', 'proppostcode', # can be taken from property
'bookdate_scoreboard', 'book_year', 'hh_gross', 'hh_net', 'ho', # HH specific
'holidayprice', # correlates with avg_spend_per_head
'bighouse', 'burghisland', 'boveycastle', # no need
'sourcecostid', # is a pair of u'sourcedesc', u'category'
'drivedistance', # correlates with drivetime
]
NOT_NA_COLS = [u'bookcode', u'code', u'propcode', u'year', u'breakpoint', u'avg_spend_per_head']
DATE_COLS = [u'bookdate', u'sdate', u"fdate"]
FLOAT_COLS = [u'avg_spend_per_head', u'drivetime']
INT_COLS = [u'adults', u'babies', u'children', u'pets']
CATEGORICAL_COLS = [u'sourcedesc', u'category']
def get_breakpoint(dt):
breakpoint = None
matcher = OLD_BREAKPOINT_MATCHER.get(dt.year, [])
for _m, _d, _b in matcher:
if _m > dt.month or (_m == dt.month and _d > dt.day):
break
breakpoint = _b
return breakpoint
def fine_tune_df(df):
logging.info(u"DF shape before fine tuning: %s", df.shape)
averages = {col: df[col].dropna().mean() for col in FLOAT_COLS}
zeros = {col: 0 for col in INT_COLS}
most_popular_values = {col: df[col].value_counts().index[0] for col in CATEGORICAL_COLS}
logging.info(u"Filling NA with average: %s", averages)
df = df.fillna(averages)
logging.info(u"Filling NA with zeros: %s", zeros)
df = df.fillna(zeros)
logging.info(u"Filling NA with most populars: %s", most_popular_values)
df = df.fillna(most_popular_values)
df[INT_COLS] = df[INT_COLS].astype(int)
logging.info(u"Before cleaning NA: %s", df.shape)
df = df.dropna(subset=NOT_NA_COLS)
logging.info(u"After cleaning NA: %s", df.shape)
if pd.isnull(df.values).any():
logging.error(u"NA values left in df")
return df
def fill_missed_breakpoints(df):
df = df[pd.notnull(df.breakpoint) | pd.notnull(df.zone_name)]
logging.info(u"Bookings having breakpoint or zone_name: %s", df.shape[0])
logging.info(u"Filling missing breakpoints: %s", df[pd.isnull(df.breakpoint)].shape[0])
df.breakpoint[pd.isnull(df.breakpoint)] = df[pd.isnull(df.breakpoint)].sdate.apply(get_breakpoint)
logging.info(u"Left NA breakpoints: %s", df[pd.isnull(df.breakpoint)].shape[0])
return df.drop(u'zone_name', axis=1)
def main():
check_data(args.input_csv, args.input_csv_delimiter)
df = raw_data_to_df(args.input_csv, args.input_csv_delimiter)
original_columns = df.columns
logging.info(u"DF initial shape: %s", df.shape)
df = df.drop(COLS_TO_DROP, axis=1)
df = canonize_datetime(df, DATE_COLS)
df = fill_missed_breakpoints(df)
df = fine_tune_df(df)
processed_columns = set(df.columns).union(COLS_TO_DROP + [u'zone_name'])
check_processed_columns(processed_columns, original_columns)
logging.info(u"Dumping data to: %s", args.output_csv)
df.to_csv(args.output_csv, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', required=True, dest="input_csv",
help=u'Path to a csv file with bookings')
parser.add_argument('--id', default=";", dest="input_csv_delimiter",
help=u"The input file's delimiter. Default: ';'")
parser.add_argument('-o', default="bookings.csv", dest="output_csv",
help=u'Path to an output file. Default: booking.csv')
parser.add_argument("--log-level", default='INFO', dest="log_level",
choices=['DEBUG', 'INFO', 'WARNINGS', 'ERROR'], help=u"Logging level")
args = parser.parse_args()
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(message)s', stream=sys.stdout, level=getattr(logging, args.log_level)
)
main()
| [((140, 4, 140, 62), 'logging.info', 'logging.info', ({(140, 17, 140, 51): 'u"""DF shape before fine tuning: %s"""', (140, 53, 140, 61): 'df.shape'}, {}), "(u'DF shape before fine tuning: %s', df.shape)", False, 'import logging\n'), ((146, 4, 146, 58), 'logging.info', 'logging.info', ({(146, 17, 146, 47): 'u"""Filling NA with average: %s"""', (146, 49, 146, 57): 'averages'}, {}), "(u'Filling NA with average: %s', averages)", False, 'import logging\n'), ((148, 4, 148, 53), 'logging.info', 'logging.info', ({(148, 17, 148, 45): 'u"""Filling NA with zeros: %s"""', (148, 47, 148, 52): 'zeros'}, {}), "(u'Filling NA with zeros: %s', zeros)", False, 'import logging\n'), ((150, 4, 150, 75), 'logging.info', 'logging.info', ({(150, 17, 150, 53): 'u"""Filling NA with most populars: %s"""', (150, 55, 150, 74): 'most_popular_values'}, {}), "(u'Filling NA with most populars: %s', most_popular_values)", False, 'import logging\n'), ((155, 4, 155, 53), 'logging.info', 'logging.info', ({(155, 17, 155, 42): 'u"""Before cleaning NA: %s"""', (155, 44, 155, 52): 'df.shape'}, {}), "(u'Before cleaning NA: %s', df.shape)", False, 'import logging\n'), ((157, 4, 157, 52), 'logging.info', 'logging.info', ({(157, 17, 157, 41): 'u"""After cleaning NA: %s"""', (157, 43, 157, 51): 'df.shape'}, {}), "(u'After cleaning NA: %s', df.shape)", False, 'import logging\n'), ((166, 4, 166, 77), 'logging.info', 'logging.info', ({(166, 17, 166, 63): 'u"""Bookings having breakpoint or zone_name: %s"""', (166, 65, 166, 76): 'df.shape[0]'}, {}), "(u'Bookings having breakpoint or zone_name: %s', df.shape[0])", False, 'import logging\n'), ((174, 4, 174, 56), 'preprocessing.common.check_data', 'check_data', ({(174, 15, 174, 29): 'args.input_csv', (174, 31, 174, 55): 'args.input_csv_delimiter'}, {}), '(args.input_csv, args.input_csv_delimiter)', False, 'from preprocessing.common import canonize_datetime, raw_data_to_df, check_processed_columns, check_data\n'), ((175, 9, 175, 65), 'preprocessing.common.raw_data_to_df', 'raw_data_to_df', ({(175, 24, 175, 38): 'args.input_csv', (175, 40, 175, 64): 'args.input_csv_delimiter'}, {}), '(args.input_csv, args.input_csv_delimiter)', False, 'from preprocessing.common import canonize_datetime, raw_data_to_df, check_processed_columns, check_data\n'), ((177, 4, 177, 51), 'logging.info', 'logging.info', ({(177, 17, 177, 40): 'u"""DF initial shape: %s"""', (177, 42, 177, 50): 'df.shape'}, {}), "(u'DF initial shape: %s', df.shape)", False, 'import logging\n'), ((181, 9, 181, 41), 'preprocessing.common.canonize_datetime', 'canonize_datetime', ({(181, 27, 181, 29): 'df', (181, 31, 181, 40): 'DATE_COLS'}, {}), '(df, DATE_COLS)', False, 'from preprocessing.common import canonize_datetime, raw_data_to_df, check_processed_columns, check_data\n'), ((187, 4, 187, 64), 'preprocessing.common.check_processed_columns', 'check_processed_columns', ({(187, 28, 187, 45): 'processed_columns', (187, 47, 187, 63): 'original_columns'}, {}), '(processed_columns, original_columns)', False, 'from preprocessing.common import canonize_datetime, raw_data_to_df, check_processed_columns, check_data\n'), ((189, 4, 189, 57), 'logging.info', 'logging.info', ({(189, 17, 189, 39): 'u"""Dumping data to: %s"""', (189, 41, 189, 56): 'args.output_csv'}, {}), "(u'Dumping data to: %s', args.output_csv)", False, 'import logging\n'), ((194, 13, 194, 104), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((160, 8, 160, 46), 'logging.error', 'logging.error', ({(160, 22, 160, 45): 'u"""NA values left in df"""'}, {}), "(u'NA values left in df')", False, 'import logging\n'), ((159, 7, 159, 27), 'pandas.isnull', 'pd.isnull', ({(159, 17, 159, 26): 'df.values'}, {}), '(df.values)', True, 'import pandas as pd\n'), ((168, 18, 168, 42), 'pandas.isnull', 'pd.isnull', ({(168, 28, 168, 41): 'df.breakpoint'}, {}), '(df.breakpoint)', True, 'import pandas as pd\n'), ((165, 12, 165, 37), 'pandas.notnull', 'pd.notnull', ({(165, 23, 165, 36): 'df.breakpoint'}, {}), '(df.breakpoint)', True, 'import pandas as pd\n'), ((165, 40, 165, 64), 'pandas.notnull', 'pd.notnull', ({(165, 51, 165, 63): 'df.zone_name'}, {}), '(df.zone_name)', True, 'import pandas as pd\n'), ((167, 56, 167, 80), 'pandas.isnull', 'pd.isnull', ({(167, 66, 167, 79): 'df.breakpoint'}, {}), '(df.breakpoint)', True, 'import pandas as pd\n'), ((168, 49, 168, 73), 'pandas.isnull', 'pd.isnull', ({(168, 59, 168, 72): 'df.breakpoint'}, {}), '(df.breakpoint)', True, 'import pandas as pd\n'), ((169, 48, 169, 72), 'pandas.isnull', 'pd.isnull', ({(169, 58, 169, 71): 'df.breakpoint'}, {}), '(df.breakpoint)', True, 'import pandas as pd\n')] |
PKU-GeekGame/gs-backend | src/api/wish.py | d13219609d4e52810540bda6a3bddac1bf5406ce | from sanic import Blueprint, Request, HTTPResponse, response
from sanic.models.handler_types import RouteHandler
from functools import wraps
from inspect import isawaitable
from typing import Callable, Dict, Any, Union, Awaitable, List, Optional
ACCEPTED_WISH_VERS = ['wish.alpha.v1']
WishHandler = Callable[..., Union[Dict[str, Any], Awaitable[Dict[str, Any]]]]
def wish_endpoint(bp: Blueprint, uri: str, *, methods: Optional[List[str]] = None) -> Callable[[WishHandler], RouteHandler]:
if methods is None:
methods = ['POST']
def decorator(fn: WishHandler) -> RouteHandler:
@wraps(fn)
async def wrapped(req: Request, *args: Any, **kwargs: Any) -> HTTPResponse:
v = req.headers.get('X-Wish-Version', '(none)')
if v not in ACCEPTED_WISH_VERS:
return response.json({
'error': 'WISH_VERSION_MISMATCH',
'error_msg': f'前端版本 {v} 不是最新',
})
retval_ = fn(req, *args, **kwargs)
retval = (await retval_) if isawaitable(retval_) else retval_
return response.json({
'error': None, # may be overridden by retval
**retval,
})
return bp.route(uri, methods)(wrapped) # type: ignore
return decorator | [((16, 9, 16, 18), 'functools.wraps', 'wraps', ({(16, 15, 16, 17): 'fn'}, {}), '(fn)', False, 'from functools import wraps\n'), ((28, 19, 31, 14), 'sanic.response.json', 'response.json', ({(28, 33, 31, 13): "{'error': None, **retval}"}, {}), "({'error': None, **retval})", False, 'from sanic import Blueprint, Request, HTTPResponse, response\n'), ((20, 23, 23, 18), 'sanic.response.json', 'response.json', ({(20, 37, 23, 17): "{'error': 'WISH_VERSION_MISMATCH', 'error_msg': f'前端版本 {v} 不是最新'}"}, {}), "({'error': 'WISH_VERSION_MISMATCH', 'error_msg': f'前端版本 {v} 不是最新'}\n )", False, 'from sanic import Blueprint, Request, HTTPResponse, response\n'), ((26, 40, 26, 60), 'inspect.isawaitable', 'isawaitable', ({(26, 52, 26, 59): 'retval_'}, {}), '(retval_)', False, 'from inspect import isawaitable\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.