text
stringlengths 4
1.02M
| meta
dict |
---|---|
import os
import shutil
from setuptools import find_packages
from setuptools import setup
if not os.path.exists('build'):
os.mkdir('build')
shutil.copyfile('imageroller/main.py', 'build/imageroller')
setup(
name='imageroller',
version='0.10.1',
description='Rackspace Server Image Creator',
long_description="""
A simple backup utility utilizing saved images
for Rackspace virtual machines.
Allows for the configuration of multiple servers
with varying retention specifications
Can easily be scheduled via crond, etc to maintain a number
of images/retention for your Rackspace hosted servers
""",
author='Lee Clemens Computing Services, LLC',
author_email='[email protected]',
url='https://lc-cs.com/',
license='MIT',
packages=find_packages(
exclude=['tests', ]
),
entry_points={
'console_scripts': [
'imageroller = imageroller.main:main_func',
],
},
setup_requires=[],
install_requires=[
'requests',
'urllib3', ],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Archiving :: Backup',
'Topic :: Utilities',
]
)
| {
"content_hash": "f6a50359bb1bd1eb991018f3c9cc17af",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 63,
"avg_line_length": 28.9811320754717,
"alnum_prop": 0.6243489583333334,
"repo_name": "leeclemens/imageroller",
"id": "715d2e413fb79600e6767a2717a31829e12369ba",
"size": "2699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103065"
}
],
"symlink_target": ""
} |
"""
This module provides JVMs to run programs.
.. moduleauthor:: Michael Markert <[email protected]>
:copyright: PenchY Developers 2011-2012, see AUTHORS
:license: MIT License, see LICENSE
"""
import itertools
import logging
import os
import shlex
import subprocess
from hashlib import sha1
from tempfile import NamedTemporaryFile
from penchy.compat import update_hasher, nested, path
from penchy.jobs.elements import PipelineElement
from penchy.jobs.hooks import Hook
from penchy.jobs.typecheck import Types
log = logging.getLogger(__name__)
class JVMNotConfiguredError(Exception):
"""
Signals that a JVM is not sufficiently configured, i.e. a workload is
missing or no classpath is set.
"""
pass
class JVMExecutionError(Exception):
"""
Signals that a execution of JVM failed, that is return non zero exit code.
"""
pass
class JVM(object):
"""
This class represents a JVM.
:attr:`hooks` :class:`~penchy.jobs.hooks.BaseHook` instances which ``setup``
and ``teardown`` methods will be executed before respectively after the JVM
is run.
"""
def __init__(self, path, options="", timeout_factor=1, name=None):
"""
:param path: path to jvm executable relative to node's basepath
(can also be absolute)
:type path: path
:param options: string of options that will be passed to JVM needs to be
properly escaped for a shell
:type options: str
:param name: decorative name, if None the JVM path with user options is used
:type name: str
"""
self.basepath = '/'
self.timeout_factor = timeout_factor
self._path = path
# keep user_options for log messages and comparisons around
self._user_options = options
self.name = name
self._options = shlex.split(options)
self._classpath = _extract_classpath(self._options)
self.hooks = []
# for tools and workloads
self._tool = None
self._workload = None
# jvm process
self.proc = None
@property
def workload(self):
"""
The current workload.
"""
return self._workload
@workload.setter
def workload(self, workload):
"""
Setter for the workload.
.. note:
Warns if an existing workload gets overwritten.
"""
if self._workload is not None: # pragma: no cover
log.warn("Overwriting workload!")
self._workload = workload
@property
def tool(self):
"""
The current tool.
"""
return self._tool
@tool.setter
def tool(self, tool):
"""
Setter for the tool.
.. note:
Warns if an existing tool gets overwritten.
"""
if self._tool is not None: # pragma: no cover
log.warn("Overwriting Tool!")
self._tool = tool
@property
def timeout(self):
"""
Timeout of this JVM and the current workload.
"""
if not self.workload:
return 0
return float(self.workload.timeout) * self.timeout_factor
def add_to_cp(self, path):
"""
Adds a path to the classpath.
:param path: classpath to add
:type path: string
"""
self._classpath.extend(path.split(os.pathsep))
def run(self):
"""
Run the JVM in the current configuration.
:raises: :exc:`JVMNotConfiguredError` if no workload or classpath is set
"""
if not self._classpath:
log.error('No classpath configured')
raise JVMNotConfiguredError('no classpath configured')
if not self.workload:
log.error('No workload configured')
raise JVMNotConfiguredError('no workload configured')
log.debug("executing setup hooks")
hooks = self._get_hooks()
for hook in hooks:
hook.setup()
log.debug("executing {0}".format(self.cmdline))
with nested(NamedTemporaryFile(delete=False, dir='.'),
NamedTemporaryFile(delete=False, dir='.')) \
as (stderr, stdout):
self.proc = subprocess.Popen(self.cmdline,
stdout=stdout, stderr=stderr)
# measure usertime before
before = os.times()[0]
log.debug('CPU time before invocation: {0}'.format(before))
self.proc.communicate()
# measure usertime after
after = os.times()[0]
diff = after - before
log.debug('CPU time after invocation: {0}, difference: '
'{1}'.format(after, diff))
if diff > 0.1:
log.error('High cpu difference: {0} seconds'.format(diff))
self.workload.out['exit_code'].append(self.proc.returncode)
self.workload.out['stdout'].append(stdout.name)
self.workload.out['stderr'].append(stderr.name)
if self.proc.returncode != 0:
log.error('jvm execution failed, stderr:')
stderr.seek(0)
log.error(stderr.read())
log.error('jvm execution failed, stdout:')
stdout.seek(0)
log.error(stdout.read())
raise JVMExecutionError('non zero exit code: {0}'
.format(self.proc.returncode))
log.debug("executing teardown hooks")
for hook in hooks:
hook.teardown()
@property
def cmdline(self):
"""
The command line suitable for `subprocess.Popen` based on the current
configuration.
"""
executable = os.path.join(self.basepath, self._path)
cp = ['-classpath', os.pathsep.join(self._classpath)] if self._classpath \
else []
if self.tool:
options = self._options + self.tool.arguments
else:
options = self._options
args = self.workload.arguments if self.workload else []
return [executable] + options + cp + args
def _get_hooks(self):
"""
Return hooks of JVM together with possible workload and tool hooks.
:returns: hooks of configuration
:rtype: list of :class:`~penchy.jobs.hooks.BaseHook`
"""
if self.workload is None:
workload_hooks = []
else:
workload_hooks = self.workload.hooks
if self.tool is None:
tool_hooks = []
else:
tool_hooks = self.tool.hooks
return list(itertools.chain(self.hooks, tool_hooks, workload_hooks))
def __eq__(self, other):
try:
return all((
# executing the equal jvm
self._path == other._path,
# with equal options
self._user_options == other._user_options,
# check if both workloads or none is set
(self.workload is None and other.workload is None
or self.workload and other.workload),
# check if both tools or none is set
(self.tool is None and other.tool is None
or self.tool and other.tool)))
except AttributeError:
log.exception('Comparing JVM to non-JVM: ')
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(hash(self._path) + hash(self._user_options))
def __repr__(self):
return "{2}({0}, '{1}')".format(self._path, self._user_options,
self.__class__.__name__)
def __str__(self):
return self.name or repr(self)
def hash(self):
"""
Return the sha1 hexdigest.
Used for identifying :class:`SystemComposition` across server and
client.
:returns: sha1 hexdigest of instance
:rtype: str
"""
hasher = sha1()
update_hasher(hasher, self._path)
update_hasher(hasher, ' '.join(self.workload.arguments)
if self.workload else '')
update_hasher(hasher, self._user_options)
return hasher.hexdigest()
def information(self):
"""
Collect and return information about the JVM.
:returns: information about the JVM, the execution and the workload
:rtype: dict
"""
executable = os.path.join(self.basepath, self._path)
cp = ['-classpath', os.pathsep.join(self._classpath)] if self._classpath else []
call = [executable] + cp
p = subprocess.Popen(call + ['-version'], stderr=subprocess.PIPE)
_, jvm = p.communicate()
if self._workload is None:
workload = ''
elif not hasattr(self._workload, 'information_arguments'):
workload = str(self._workload)
else:
p = subprocess.Popen(call + self._workload.information_arguments,
stderr=subprocess.PIPE)
_, workload = p.communicate()
workload = workload.decode('utf-8')
tool = str(self._tool) if self._tool else ''
return {
'jvm' : jvm.decode('utf-8'),
'cmdline' : ' '.join(self.cmdline),
'workload' : workload,
'tool' : tool
}
class WrappedJVM(JVM, PipelineElement): # pragma: no cover
"""
This class is an abstract base class for a JVM that is wrapped by another
Program.
Inheriting classes must expose this attributes:
- ``out`` a dictionary that maps logical names for output to actual.
- ``outputs`` a :class:`~penchy.jobs.typecheck.Types` that describes the
output with a logical name and its types
- ``cmdline`` that returns the cmdline suitable for :class:`subprocess.Popen`
"""
def _run(self):
raise ValueError('This is not your normal element, but a JVM')
class ValgrindJVM(WrappedJVM):
"""
This class represents a JVM which is called by valgrind.
Outputs:
- ``valgrind_log``: paths to valgrind log file.
"""
outputs = Types(('valgrind_log', list, path))
arguments = []
def __init__(self, path, options='',
valgrind_path='valgrind', valgrind_options=''):
"""
:param path: path to jvm executable relative to node's basepath
(can also be absolute)
:type path: str
:param options: options for JVM (needs to be escaped for a shell)
:type options: str
:param valgrind_path: path to valgrind executable
:type valgrind_path: str
:param valgrind_options: options for valgrind (needs to be escaped for
shell)
:type valgrind_options: str
"""
super(ValgrindJVM, self).__init__(path, options)
PipelineElement.__init__(self)
self.valgrind_path = valgrind_path
self.valgrind_options = valgrind_options
self.log_name = 'penchy-valgrind.log'
self.hooks.append(Hook(teardown=lambda: self.out['valgrind_log']
.append(os.path.abspath(self.log_name))))
if hasattr(self, '_hooks'):
self.hooks.extend(self._hooks)
@property
def cmdline(self):
"""
The command line suitable for `subprocess.Popen` based on the current
configuration.
"""
cmd = [self.valgrind_path,
'--log-file={0}'.format(self.log_name),
'--smc-check=all', # to support reflection, really slow
'--trace-children=yes']
if self.__class__.arguments:
cmd.extend(self.__class__.arguments)
cmd.extend(shlex.split(self.valgrind_options))
return cmd + super(ValgrindJVM, self).cmdline
def information(self):
"""
Collect and return information about the JVM and Valgrind.
:returns: information about the JVM, the execution and the workload
:rtype: dict
"""
d = super(ValgrindJVM, self).information()
p = subprocess.Popen([self.valgrind_path, '--version'], stdout=subprocess.PIPE)
valgrind, _ = p.communicate()
d['valgrind'] = valgrind.decode('utf-8')
return d
class MemcheckJVM(ValgrindJVM):
"""
This is a valgrind JVM that checks memory usage.
Outputs:
- ``valgrind_log``: paths to Memcheck log file.
"""
arguments = ['--tool=memcheck']
class CacheGrindJVM(ValgrindJVM):
"""
This is a valgrind JVM that checks cache usage.
Outputs:
- ``valgrind_log``: paths to Valgrind log file.
- ``cachegrind``: paths to Cachegrind log file.
"""
outputs = Types(('valgrind_log', list, path),
('cachegrind', list, path))
_cachegrind_file = 'penchy-cachegrind'
arguments = ['--tool=cachegrind',
'--cachegrind-out-file={0}'.format(_cachegrind_file)]
def __init__(self, *args, **kwargs):
super(CacheGrindJVM, self).__init__(*args, **kwargs)
self._hooks = [Hook(teardown=lambda: self.out['cachegrind']
.append(os.path.abspath(CacheGrindJVM._cachegrind_file)))]
class CallGrindJVM(ValgrindJVM):
"""
This is a valgrind JVM that generates a call graph.
Outputs:
- ``valgrind_log``: paths to Valgrind log file.
- ``callgrind``: paths to Callgrind log file.
"""
outputs = Types(('valgrind_log', list, path),
('callgrind', list, path))
_callgrind_file = 'penchy-callgrind'
arguments = ['--tool=callgrind',
'--callgrind-out-file={0}'.format(_callgrind_file)]
def __init__(self, *args, **kwargs):
super(CallGrindJVM, self).__init__(*args, **kwargs)
self._hooks = [Hook(teardown=lambda: self.out['callgrind']
.append(os.path.abspath(CallGrindJVM._callgrind_file)))]
class MassifJVM(ValgrindJVM):
"""
This is a valgrind JVM that runs the heap profiler Massif.
Outputs:
- ``valgrind_log``: paths to Valgrind log file.
"""
arguments = ['--tool=massif']
def _extract_classpath(options):
"""
Return the jvm classpath from a sequence of option strings.
:param options: sequence of jvm options to search
:type options: list
:returns: classpath as list of parts
:rtype: list
"""
prev = ''
# a later classpath overwrites previous definitions so we have to search
# from the end
for option in reversed(options):
if option in ('-cp', '-classpath'):
# check prev for emptyness to prevent returning [''] if classpath is
# only option
return prev.split(os.pathsep) if prev else []
prev = option
return []
| {
"content_hash": "a991a03781502b9f40525af53b87f6fb",
"timestamp": "",
"source": "github",
"line_count": 480,
"max_line_length": 89,
"avg_line_length": 31.047916666666666,
"alnum_prop": 0.5759914111252767,
"repo_name": "fhirschmann/penchy",
"id": "d766b991d4529bf65c4b4bc602f4e26521a8d84f",
"size": "14903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "penchy/jobs/jvms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "295609"
},
{
"name": "Shell",
"bytes": "211"
}
],
"symlink_target": ""
} |
import multiprocessing
import time,os
def func(msg):
for i in xrange(3):
print msg + ' ' + str(os.getpid())
time.sleep(1)
return "done " + msg
if __name__ == "__main__":
pool = multiprocessing.Pool(processes=4)
result = []
for i in xrange(10):
msg = "hello %d" %(i)
result.append(pool.apply_async(func, (msg, )))
pool.close()
pool.join()
for res in result:
print res.get()
print "Sub-process(es) done."
| {
"content_hash": "42dba2eb57a4b6ff2dc42857d31d6aa2",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 54,
"avg_line_length": 24.15,
"alnum_prop": 0.5527950310559007,
"repo_name": "pelucky/python-test",
"id": "3f7038dbbc1b0d17ec937010ef6e65e48a56d62d",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rebootArch4/homework2/multiporcess_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24313"
},
{
"name": "VimL",
"bytes": "4749"
}
],
"symlink_target": ""
} |
import threading
from concurrent import futures
from taskflow.engines.action_engine import graph_action
from taskflow.engines.action_engine import task_action
from taskflow.engines import base
from taskflow import exceptions as exc
from taskflow.openstack.common import excutils
from taskflow.openstack.common import uuidutils
from taskflow import states
from taskflow import storage as t_storage
from taskflow.utils import flow_utils
from taskflow.utils import lock_utils
from taskflow.utils import misc
from taskflow.utils import reflection
from taskflow.utils import threading_utils
class ActionEngine(base.EngineBase):
"""Generic action-based engine.
This engine flattens the flow (and any subflows) into a execution graph
which contains the full runtime definition to be executed and then uses
this graph in combination with the action classes & storage to attempt to
run your flow (and any subflows & contained tasks) to completion.
During this process it is permissible and valid to have a task or multiple
tasks in the execution graph fail, which will cause the process of
reversion to commence. See the valid states in the states module to learn
more about what other states the tasks & flow being ran can go through.
"""
_graph_action = None
def __init__(self, flow, flow_detail, backend, conf):
super(ActionEngine, self).__init__(flow, flow_detail, backend, conf)
self._failures = {} # task uuid => failure
self._root = None
self._lock = threading.RLock()
self._state_lock = threading.RLock()
self.notifier = misc.TransitionNotifier()
self.task_notifier = misc.TransitionNotifier()
def _revert(self, current_failure=None):
self._change_state(states.REVERTING)
try:
state = self._root.revert(self)
except Exception:
with excutils.save_and_reraise_exception():
self._change_state(states.FAILURE)
self._change_state(state)
if state == states.SUSPENDED:
return
misc.Failure.reraise_if_any(self._failures.values())
if current_failure:
current_failure.reraise()
def __str__(self):
return "%s: %s" % (reflection.get_class_name(self), id(self))
def suspend(self):
"""Attempts to suspend the engine.
If the engine is currently running tasks then this will attempt to
suspend future work from being started (currently active tasks can
not currently be preempted) and move the engine into a suspend state
which can then later be resumed from.
"""
self._change_state(states.SUSPENDING)
@property
def execution_graph(self):
self.compile()
return self._root.graph
@lock_utils.locked
def run(self):
"""Runs the flow in the engine to completion."""
self.compile()
external_provides = set(self.storage.fetch_all().keys())
missing = self._flow.requires - external_provides
if missing:
raise exc.MissingDependencies(self._flow, sorted(missing))
if self._failures:
self._revert()
else:
self._run()
def _run(self):
self._change_state(states.RUNNING)
try:
state = self._root.execute(self)
except Exception:
self._change_state(states.FAILURE)
self._revert(misc.Failure())
else:
self._change_state(state)
@lock_utils.locked(lock='_state_lock')
def _change_state(self, state):
old_state = self.storage.get_flow_state()
if not states.check_flow_transition(old_state, state):
return
self.storage.set_flow_state(state)
try:
flow_uuid = self._flow.uuid
except AttributeError:
# NOTE(harlowja): if the flow was just a single task, then it will
# not itself have a uuid, but the constructed flow_detail will.
if self._flow_detail is not None:
flow_uuid = self._flow_detail.uuid
else:
flow_uuid = None
details = dict(engine=self,
flow_name=self._flow.name,
flow_uuid=flow_uuid,
old_state=old_state)
self.notifier.notify(state, details)
def _on_task_state_change(self, task_action, state, result=None):
"""Notifies the engine that the following task action has completed
a given state with a given result. This is a *internal* to the action
engine and its associated action classes, not for use externally.
"""
if isinstance(result, misc.Failure):
self._failures[task_action.uuid] = result
details = dict(engine=self,
task_name=task_action.name,
task_uuid=task_action.uuid,
result=result)
self.task_notifier.notify(state, details)
@lock_utils.locked
def compile(self):
"""Compiles the contained flow into a structure which the engine can
use to run or if this can not be done then an exception is thrown
indicating why this compilation could not be achieved.
"""
if self._root is not None:
return
assert self._graph_action is not None, ('Graph action class must be'
' specified')
self._change_state(states.RESUMING) # does nothing in PENDING state
task_graph = flow_utils.flatten(self._flow)
self._root = self._graph_action(task_graph)
loaded_failures = {}
for task in task_graph.nodes_iter():
try:
task_id = self.storage.get_uuid_by_name(task.name)
except exc.NotFound:
task_id = uuidutils.generate_uuid()
task_version = misc.get_version_string(task)
self.storage.add_task(task_name=task.name, uuid=task_id,
task_version=task_version)
try:
result = self.storage.get(task_id)
except exc.NotFound:
result = None
if isinstance(result, misc.Failure):
# NOTE(imelnikov): old failure may have exc_info which
# might get lost during serialization, so we preserve
# old failure object if possible.
old_failure = self._failures.get(task_id, None)
if result.matches(old_failure):
loaded_failures[task_id] = old_failure
else:
loaded_failures[task_id] = result
self.storage.set_result_mapping(task_id, task.save_as)
self._root.add(task, task_action.TaskAction(task, task_id))
self._failures = loaded_failures
self._change_state(states.SUSPENDED) # does nothing in PENDING state
@property
def is_running(self):
return self.storage.get_flow_state() == states.RUNNING
@property
def is_reverting(self):
return self.storage.get_flow_state() == states.REVERTING
class SingleThreadedActionEngine(ActionEngine):
# NOTE(harlowja): This one attempts to run in a serial manner.
_graph_action = graph_action.SequentialGraphAction
_storage_cls = t_storage.Storage
class MultiThreadedActionEngine(ActionEngine):
# NOTE(harlowja): This one attempts to run in a parallel manner.
_graph_action = graph_action.ParallelGraphAction
_storage_cls = t_storage.ThreadSafeStorage
def __init__(self, flow, flow_detail, backend, conf):
super(MultiThreadedActionEngine, self).__init__(
flow, flow_detail, backend, conf)
self._executor = conf.get('executor', None)
@lock_utils.locked
def run(self):
if self._executor is None:
# NOTE(harlowja): since no executor was provided we have to create
# one, and also ensure that we shutdown the one we create to
# ensure that we don't leak threads.
thread_count = threading_utils.get_optimal_thread_count()
self._executor = futures.ThreadPoolExecutor(thread_count)
owns_executor = True
else:
owns_executor = False
try:
ActionEngine.run(self)
finally:
# Don't forget to shutdown the executor!!
if owns_executor:
try:
self._executor.shutdown(wait=True)
finally:
self._executor = None
@property
def executor(self):
"""Returns the current executor, if no executor is provided on
construction then this executor will change each time the engine
is ran.
"""
return self._executor
| {
"content_hash": "f3e2137fced92f1adbd8f92f9ae1f5b1",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 78,
"avg_line_length": 37.914529914529915,
"alnum_prop": 0.6157574391343553,
"repo_name": "ntt-sic/taskflow",
"id": "f104dac28576c2165f23e72ea2eb33e1678137f8",
"size": "9574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskflow/engines/action_engine/engine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "469752"
},
{
"name": "Shell",
"bytes": "3255"
}
],
"symlink_target": ""
} |
"""Inception - reduced version of keras/applications/inception_v3.py ."""
from kws_streaming.layers import modes
from kws_streaming.layers import speech_features
from kws_streaming.layers.compat import tf
import kws_streaming.models.model_utils as utils
def model_parameters(parser_nn):
"""Inception model parameters.
Args:
parser_nn: global command line args parser
Returns: parser with updated arguments
"""
parser_nn.add_argument(
'--cnn1_filters',
type=str,
default='24',
help='Number of filters in the first conv blocks',
)
parser_nn.add_argument(
'--cnn1_kernel_sizes',
type=str,
default='5',
help='Kernel size in time dim of conv blocks',
)
parser_nn.add_argument(
'--cnn1_strides',
type=str,
default='1',
help='Strides applied in pooling layer in the first conv block',
)
parser_nn.add_argument(
'--cnn2_filters1',
type=str,
default='10,10,16',
help='Number of filters inside of inception block '
'will be multipled by 4 because of concatenation of 4 branches',
)
parser_nn.add_argument(
'--cnn2_filters2',
type=str,
default='10,10,16',
help='Number of filters inside of inception block '
'it is used to reduce the dim of cnn2_filters1*4',
)
parser_nn.add_argument(
'--cnn2_kernel_sizes',
type=str,
default='5,5,5',
help='Kernel sizes of conv layers in the inception block',
)
parser_nn.add_argument(
'--cnn2_strides',
type=str,
default='2,2,1',
help='Stride parameter of pooling layer in the inception block',
)
parser_nn.add_argument(
'--dropout',
type=float,
default=0.2,
help='Percentage of data dropped',
)
parser_nn.add_argument(
'--bn_scale',
type=int,
default=0,
help='If True, multiply by gamma. If False, gamma is not used. '
'When the next layer is linear (also e.g. nn.relu), this can be disabled'
'since the scaling will be done by the next layer.',
)
def model(flags):
"""Inception model.
It is based on paper:
Rethinking the Inception Architecture for Computer Vision
http://arxiv.org/abs/1512.00567
Args:
flags: data/model parameters
Returns:
Keras model for training
"""
input_audio = tf.keras.layers.Input(
shape=modes.get_input_data_shape(flags, modes.Modes.TRAINING),
batch_size=flags.batch_size)
net = input_audio
if flags.preprocess == 'raw':
# it is a self contained model, user need to feed raw audio only
net = speech_features.SpeechFeatures(
speech_features.SpeechFeatures.get_params(flags))(
net)
# [batch, time, feature]
net = tf.keras.backend.expand_dims(net, axis=2)
# [batch, time, 1, feature]
for stride, filters, kernel_size in zip(
utils.parse(flags.cnn1_strides),
utils.parse(flags.cnn1_filters),
utils.parse(flags.cnn1_kernel_sizes)):
net = utils.conv2d_bn(
net, filters, (kernel_size, 1), padding='valid', scale=flags.bn_scale)
if stride > 1:
net = tf.keras.layers.MaxPooling2D((3, 1), strides=(stride, 1))(net)
for stride, filters1, filters2, kernel_size in zip(
utils.parse(flags.cnn2_strides), utils.parse(flags.cnn2_filters1),
utils.parse(flags.cnn2_filters2), utils.parse(flags.cnn2_kernel_sizes)):
branch1 = utils.conv2d_bn(net, filters1, (1, 1), scale=flags.bn_scale)
branch2 = utils.conv2d_bn(net, filters1, (1, 1), scale=flags.bn_scale)
branch2 = utils.conv2d_bn(
branch2, filters1, (kernel_size, 1), scale=flags.bn_scale)
branch3 = utils.conv2d_bn(net, filters1, (1, 1), scale=flags.bn_scale)
branch3 = utils.conv2d_bn(
branch3, filters1, (kernel_size, 1), scale=flags.bn_scale)
branch3 = utils.conv2d_bn(
branch3, filters1, (kernel_size, 1), scale=flags.bn_scale)
net = tf.keras.layers.concatenate([branch1, branch2, branch3])
# [batch, time, 1, filters*4]
net = utils.conv2d_bn(net, filters2, (1, 1), scale=flags.bn_scale)
# [batch, time, 1, filters2]
if stride > 1:
net = tf.keras.layers.MaxPooling2D((3, 1), strides=(stride, 1))(net)
net = tf.keras.layers.GlobalAveragePooling2D()(net)
# [batch, filters*4]
net = tf.keras.layers.Dropout(flags.dropout)(net)
net = tf.keras.layers.Dense(flags.label_count)(net)
if flags.return_softmax:
net = tf.keras.layers.Activation('softmax')(net)
return tf.keras.Model(input_audio, net)
| {
"content_hash": "30fa40be6e5068cc27e91cf5699d5e83",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 79,
"avg_line_length": 31.99290780141844,
"alnum_prop": 0.651961870982044,
"repo_name": "google-research/google-research",
"id": "690c15338446150dd17384a1bd1b6af78000c75b",
"size": "5119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kws_streaming/models/inception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
"""Commands that can be used to operate on explorations.
All functions here should be agnostic of how ExplorationModel objects are
stored in the database. In particular, the various query methods should
delegate to the Exploration model class. This will enable the exploration
storage model to be changed without affecting this module and others above it.
"""
import copy
import datetime
import logging
import os
import pprint
import StringIO
import zipfile
from core.domain import exp_domain
from core.domain import fs_domain
from core.domain import rights_manager
from core.domain import user_services
from core.platform import models
import feconf
import utils
memcache_services = models.Registry.import_memcache_services()
search_services = models.Registry.import_search_services()
taskqueue_services = models.Registry.import_taskqueue_services()
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
# This takes additional 'title' and 'category' parameters.
CMD_CREATE_NEW = 'create_new'
# Name for the exploration search index.
SEARCH_INDEX_EXPLORATIONS = 'explorations'
# The maximum number of iterations allowed for populating the results of a
# search query.
MAX_ITERATIONS = 10
# Constants used to initialize EntityChangeListSummarizer
_BASE_ENTITY_STATE = 'state'
_BASE_ENTITY_GADGET = 'gadget'
# Constants used for gallery ranking.
_STATUS_PUBLICIZED_BONUS = 30
# This is done to prevent the rank hitting 0 too easily. Note that
# negative ranks are disallowed in the Search API.
_DEFAULT_RANK = 20
_MS_IN_ONE_DAY = 24 * 60 * 60 * 1000
def _migrate_states_schema(versioned_exploration_states):
"""Holds the responsibility of performing a step-by-step, sequential update
of an exploration states structure based on the schema version of the input
exploration dictionary. This is very similar to the YAML conversion process
found in exp_domain.py and, in fact, many of the conversion functions for
states are also used in the YAML conversion pipeline. If the current
exploration states schema version changes
(feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION), a new conversion
function must be added and some code appended to this function to account
for that new version.
Args:
versioned_exploration_states: A dict with two keys:
- states_schema_version: the states schema version for the
exploration.
- states: the dict of states comprising the exploration. The keys in
this dict are state names.
"""
states_schema_version = versioned_exploration_states[
'states_schema_version']
if states_schema_version is None or states_schema_version < 1:
states_schema_version = 0
if not (0 <= states_schema_version
<= feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d and unversioned exploration '
'state schemas at present.' %
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
while (states_schema_version <
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION):
exp_domain.Exploration.update_states_from_model(
versioned_exploration_states, states_schema_version)
states_schema_version += 1
# Repository GET methods.
def _get_exploration_memcache_key(exploration_id, version=None):
"""Returns a memcache key for an exploration."""
if version:
return 'exploration-version:%s:%s' % (exploration_id, version)
else:
return 'exploration:%s' % exploration_id
def get_exploration_from_model(exploration_model, run_conversion=True):
"""Returns an Exploration domain object given an exploration model loaded
from the datastore.
If run_conversion is True, then the exploration's states schema version
will be checked against the current states schema version. If they do not
match, the exploration will be automatically updated to the latest states
schema version.
IMPORTANT NOTE TO DEVELOPERS: In general, run_conversion should never be
False. This option is only used for testing that the states schema version
migration works correctly, and it should never be changed otherwise.
"""
# Ensure the original exploration model does not get altered.
versioned_exploration_states = {
'states_schema_version': exploration_model.states_schema_version,
'states': copy.deepcopy(exploration_model.states)
}
# If the exploration uses the latest states schema version, no conversion
# is necessary.
if (run_conversion and exploration_model.states_schema_version !=
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION):
_migrate_states_schema(versioned_exploration_states)
return exp_domain.Exploration(
exploration_model.id, exploration_model.title,
exploration_model.category, exploration_model.objective,
exploration_model.language_code, exploration_model.tags,
exploration_model.blurb, exploration_model.author_notes,
exploration_model.skin_customizations,
versioned_exploration_states['states_schema_version'],
exploration_model.init_state_name,
versioned_exploration_states['states'],
exploration_model.param_specs, exploration_model.param_changes,
exploration_model.version, created_on=exploration_model.created_on,
last_updated=exploration_model.last_updated)
def get_exploration_summary_from_model(exp_summary_model):
return exp_domain.ExplorationSummary(
exp_summary_model.id, exp_summary_model.title,
exp_summary_model.category, exp_summary_model.objective,
exp_summary_model.language_code, exp_summary_model.tags,
exp_summary_model.ratings, exp_summary_model.status,
exp_summary_model.community_owned, exp_summary_model.owner_ids,
exp_summary_model.editor_ids, exp_summary_model.viewer_ids,
exp_summary_model.contributor_ids, exp_summary_model.version,
exp_summary_model.exploration_model_created_on,
exp_summary_model.exploration_model_last_updated
)
def get_exploration_by_id(exploration_id, strict=True, version=None):
"""Returns a domain object representing an exploration."""
exploration_memcache_key = _get_exploration_memcache_key(
exploration_id, version=version)
memcached_exploration = memcache_services.get_multi(
[exploration_memcache_key]).get(exploration_memcache_key)
if memcached_exploration is not None:
return memcached_exploration
else:
exploration_model = exp_models.ExplorationModel.get(
exploration_id, strict=strict, version=version)
if exploration_model:
exploration = get_exploration_from_model(exploration_model)
memcache_services.set_multi({
exploration_memcache_key: exploration})
return exploration
else:
return None
def get_exploration_summary_by_id(exploration_id):
"""Returns a domain object representing an exploration summary."""
# TODO(msl): Maybe use memcache similarly to get_exploration_by_id.
exp_summary_model = exp_models.ExpSummaryModel.get(
exploration_id)
if exp_summary_model:
exp_summary = get_exploration_summary_from_model(exp_summary_model)
return exp_summary
else:
return None
def get_multiple_explorations_by_id(exp_ids, strict=True):
"""Returns a dict of domain objects representing explorations with the
given ids as keys. If an exp_id is not present it is not included in the
return dict.
"""
exp_ids = set(exp_ids)
result = {}
uncached = []
memcache_keys = [_get_exploration_memcache_key(i) for i in exp_ids]
cache_result = memcache_services.get_multi(memcache_keys)
for exp_obj in cache_result.itervalues():
result[exp_obj.id] = exp_obj
for _id in exp_ids:
if _id not in result:
uncached.append(_id)
db_exp_models = exp_models.ExplorationModel.get_multi(uncached)
db_results_dict = {}
not_found = []
for i, eid in enumerate(uncached):
model = db_exp_models[i]
if model:
exploration = get_exploration_from_model(model)
db_results_dict[eid] = exploration
else:
logging.info('Tried to fetch exploration with id %s, but no such '
'exploration exists in the datastore' % eid)
not_found.append(eid)
if strict and not_found:
raise ValueError(
'Couldn\'t find explorations with the following ids:\n%s'
% '\n'.join(not_found))
cache_update = {
eid: db_results_dict[eid] for eid in db_results_dict.iterkeys()
if db_results_dict[eid] is not None
}
if cache_update:
memcache_services.set_multi(cache_update)
result.update(db_results_dict)
return result
def get_new_exploration_id():
"""Returns a new exploration id."""
return exp_models.ExplorationModel.get_new_id('')
def is_exp_summary_editable(exp_summary, user_id=None):
"""Checks if a given user may edit an exploration by checking
the given domain object.
"""
return user_id is not None and (
user_id in exp_summary.editor_ids
or user_id in exp_summary.owner_ids
or exp_summary.community_owned)
# Query methods.
def get_exploration_titles_and_categories(exp_ids):
"""Returns exploration titles and categories for the given ids.
The result is a dict with exploration ids as keys. The corresponding values
are dicts with the keys 'title' and 'category'.
Any invalid exp_ids will not be included in the return dict. No error will
be raised.
"""
explorations = [
(get_exploration_from_model(e) if e else None)
for e in exp_models.ExplorationModel.get_multi(exp_ids)]
result = {}
for exploration in explorations:
if exploration is None:
logging.error(
'Could not find exploration corresponding to id')
else:
result[exploration.id] = {
'title': exploration.title,
'category': exploration.category,
}
return result
def _get_exploration_summary_dicts_from_models(exp_summary_models):
"""Given an iterable of ExpSummaryModel instances, create a dict containing
corresponding exploration summary domain objects, keyed by id.
"""
exploration_summaries = [
get_exploration_summary_from_model(exp_summary_model)
for exp_summary_model in exp_summary_models]
result = {}
for exp_summary in exploration_summaries:
result[exp_summary.id] = exp_summary
return result
def get_exploration_summaries_matching_ids(exp_ids):
"""Given a list of exploration ids, return a list with the corresponding
summary domain objects (or None if the corresponding summary does not
exist).
"""
return [
(get_exploration_summary_from_model(model) if model else None)
for model in exp_models.ExpSummaryModel.get_multi(exp_ids)]
def get_exploration_ids_matching_query(query_string, cursor=None):
"""Returns a list with all exploration ids matching the given search query
string, as well as a search cursor for future fetches.
This method returns exactly feconf.GALLERY_PAGE_SIZE results if there are
at least that many, otherwise it returns all remaining results. (If this
behaviour does not occur, an error will be logged.) The method also returns
a search cursor.
"""
returned_exploration_ids = []
search_cursor = cursor
for _ in range(MAX_ITERATIONS):
remaining_to_fetch = feconf.GALLERY_PAGE_SIZE - len(
returned_exploration_ids)
exp_ids, search_cursor = search_explorations(
query_string, remaining_to_fetch, cursor=search_cursor)
invalid_exp_ids = []
for ind, model in enumerate(
exp_models.ExpSummaryModel.get_multi(exp_ids)):
if model is not None:
returned_exploration_ids.append(exp_ids[ind])
else:
invalid_exp_ids.append(exp_ids[ind])
if len(returned_exploration_ids) == feconf.GALLERY_PAGE_SIZE or (
search_cursor is None):
break
else:
logging.error(
'Search index contains stale exploration ids: %s' %
', '.join(invalid_exp_ids))
if (len(returned_exploration_ids) < feconf.GALLERY_PAGE_SIZE
and search_cursor is not None):
logging.error(
'Could not fulfill search request for query string %s; at least '
'%s retries were needed.' % (query_string, MAX_ITERATIONS))
return (returned_exploration_ids, search_cursor)
def get_non_private_exploration_summaries():
"""Returns a dict with all non-private exploration summary domain objects,
keyed by their id.
"""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_non_private())
def get_all_exploration_summaries():
"""Returns a dict with all exploration summary domain objects,
keyed by their id.
"""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_all())
# Methods for exporting states and explorations to other formats.
def export_to_zip_file(exploration_id, version=None):
"""Returns a ZIP archive of the exploration."""
exploration = get_exploration_by_id(exploration_id, version=version)
yaml_repr = exploration.to_yaml()
memfile = StringIO.StringIO()
with zipfile.ZipFile(
memfile, mode='w', compression=zipfile.ZIP_DEFLATED) as zfile:
zfile.writestr('%s.yaml' % exploration.title, yaml_repr)
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
dir_list = fs.listdir('')
for filepath in dir_list:
# Currently, the version number of all files is 1, since they are
# not modifiable post-upload.
# TODO(sll): When allowing editing of files, implement versioning
# for them.
file_contents = fs.get(filepath, version=1)
str_filepath = 'assets/%s' % filepath
assert isinstance(str_filepath, str)
unicode_filepath = str_filepath.decode('utf-8')
zfile.writestr(unicode_filepath, file_contents)
return memfile.getvalue()
def export_states_to_yaml(exploration_id, version=None, width=80):
"""Returns a python dictionary of the exploration, whose keys are state
names and values are yaml strings representing the state contents with
lines wrapped at 'width' characters.
"""
exploration = get_exploration_by_id(exploration_id, version=version)
exploration_dict = {}
for state in exploration.states:
exploration_dict[state] = utils.yaml_from_dict(
exploration.states[state].to_dict(), width=width)
return exploration_dict
# Repository SAVE and DELETE methods.
def apply_change_list(exploration_id, change_list):
"""Applies a changelist to a pristine exploration and returns the result.
Each entry in change_list is a dict that represents an ExplorationChange
object.
Returns:
the resulting exploration domain object.
"""
exploration = get_exploration_by_id(exploration_id)
try:
changes = [exp_domain.ExplorationChange(change_dict)
for change_dict in change_list]
for change in changes:
if change.cmd == exp_domain.CMD_ADD_STATE:
exploration.add_states([change.state_name])
elif change.cmd == exp_domain.CMD_RENAME_STATE:
exploration.rename_state(
change.old_state_name, change.new_state_name)
elif change.cmd == exp_domain.CMD_DELETE_STATE:
exploration.delete_state(change.state_name)
elif change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY:
state = exploration.states[change.state_name]
if (change.property_name ==
exp_domain.STATE_PROPERTY_PARAM_CHANGES):
state.update_param_changes(change.new_value)
elif change.property_name == exp_domain.STATE_PROPERTY_CONTENT:
state.update_content(change.new_value)
elif (
change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_ID):
state.update_interaction_id(change.new_value)
elif (
change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS):
state.update_interaction_customization_args(
change.new_value)
elif (
change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_HANDLERS):
raise utils.InvalidInputException(
'Editing interaction handlers is no longer supported')
elif (
change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS):
state.update_interaction_answer_groups(change.new_value)
elif (
change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME):
state.update_interaction_default_outcome(change.new_value)
elif (
change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_UNCLASSIFIED_ANSWERS):
state.update_interaction_confirmed_unclassified_answers(
change.new_value)
elif (
change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_FALLBACKS):
state.update_interaction_fallbacks(change.new_value)
elif change.cmd == exp_domain.CMD_ADD_GADGET:
exploration.add_gadget(change.gadget_dict, change.panel)
elif change.cmd == exp_domain.CMD_RENAME_GADGET:
exploration.rename_gadget(
change.old_gadget_name, change.new_gadget_name)
elif change.cmd == exp_domain.CMD_DELETE_GADGET:
exploration.delete_gadget(change.gadget_name)
elif change.cmd == exp_domain.CMD_EDIT_GADGET_PROPERTY:
gadget_instance = exploration.get_gadget_instance_by_name(
change.gadget_name)
if (change.property_name ==
exp_domain.GADGET_PROPERTY_VISIBILITY):
gadget_instance.update_visible_in_states(change.new_value)
elif (
change.property_name ==
exp_domain.GADGET_PROPERTY_CUST_ARGS):
gadget_instance.update_customization_args(
change.new_value)
elif change.cmd == exp_domain.CMD_EDIT_EXPLORATION_PROPERTY:
if change.property_name == 'title':
exploration.update_title(change.new_value)
elif change.property_name == 'category':
exploration.update_category(change.new_value)
elif change.property_name == 'objective':
exploration.update_objective(change.new_value)
elif change.property_name == 'language_code':
exploration.update_language_code(change.new_value)
elif change.property_name == 'tags':
exploration.update_tags(change.new_value)
elif change.property_name == 'blurb':
exploration.update_blurb(change.new_value)
elif change.property_name == 'author_notes':
exploration.update_author_notes(change.new_value)
elif change.property_name == 'param_specs':
exploration.update_param_specs(change.new_value)
elif change.property_name == 'param_changes':
exploration.update_param_changes(change.new_value)
elif change.property_name == 'init_state_name':
exploration.update_init_state_name(change.new_value)
elif (
change.cmd ==
exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION):
# Loading the exploration model from the datastore into an
# Exploration domain object automatically converts it to use
# the latest states schema version. As a result, simply
# resaving the exploration is sufficient to apply the states
# schema update.
continue
return exploration
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, exploration_id,
pprint.pprint(change_list))
)
raise
class EntityChangeListSummarizer(object):
"""Summarizes additions, deletions, and general changes against a given
entity type.
"""
def __init__(self, entity_type):
"""
Args:
- entity_type: string. Type of the base entity (e.g. 'state')
"""
self.entity_type = entity_type
# A list of added entity names.
self.added_entities = []
# A list of deleted entity names.
self.deleted_entities = []
# A list of entity names. This indicates that the entity has changed
# but we do not know what the changes are. This can happen for
# complicated operations like removing an entity and later adding a
# new entity with the same name as the removed one.
self.changed_entities = []
# A dict, where each key is an entity's name, and the corresponding
# values are dicts; the keys of these dicts represent properties of
# the entity, and the corresponding values are dicts with keys
# old_value and new_value. If an entity's 'name' property is changed,
# this is listed as a property name change under the old entity name
# in the outer dict.
self.property_changes = {}
@property
def add_entity_cmd(self):
return 'add_%s' % self.entity_type
@property
def rename_entity_cmd(self):
return 'rename_%s' % self.entity_type
@property
def delete_entity_cmd(self):
return 'delete_%s' % self.entity_type
@property
def edit_entity_property_cmd(self):
return 'edit_%s_property' % self.entity_type
@property
def entity_name(self):
return '%s_name' % self.entity_type
@property
def new_entity_name(self):
return 'new_%s_name' % self.entity_type
@property
def old_entity_name(self):
return 'old_%s_name' % self.entity_type
def process_changes(self, original_entity_names, changes):
"""Processes the changes, making results available in each of the
initialized data structures of the EntityChangeListSummarizer.
Args:
- original_entity_names: a list of strings representing the names of
the individual entities before any of the changes in the change list
have been made.
- changes: list of ExplorationChange instances.
"""
# TODO(sll): Make this method do the same thing as the corresponding
# code in the frontend's editor history tab -- or, better still, get
# rid of it entirely and use the frontend code to generate change
# summaries directly.
# original_names is a dict whose keys are the current state names, and
# whose values are the names of these states before any changes were
# applied. It is used to keep track of identities of states throughout
# the sequence of changes.
original_names = {name: name for name in original_entity_names}
for change in changes:
if change.cmd == self.add_entity_cmd:
entity_name = getattr(change, self.entity_name)
if entity_name in self.changed_entities:
continue
elif entity_name in self.deleted_entities:
self.changed_entities.append(entity_name)
# TODO(sll): This logic doesn't make sense for the
# following sequence of events: (a) an existing
# non-default entity is deleted, (b) a new default entity
# with the same name is created. Rewrite this method to
# take that case into account (or, better still, delete it
# altogether and use the frontend history diff
# functionality instead).
if entity_name in self.property_changes:
del self.property_changes[entity_name]
self.deleted_entities.remove(entity_name)
else:
self.added_entities.append(entity_name)
original_names[entity_name] = entity_name
elif change.cmd == self.rename_entity_cmd:
new_entity_name = getattr(change, self.new_entity_name)
old_entity_name = getattr(change, self.old_entity_name)
orig_name = original_names[old_entity_name]
original_names[new_entity_name] = orig_name
del original_names[old_entity_name]
if orig_name in self.changed_entities:
continue
if orig_name not in self.property_changes:
self.property_changes[orig_name] = {}
if 'name' not in self.property_changes[orig_name]:
self.property_changes[orig_name]['name'] = {
'old_value': old_entity_name
}
self.property_changes[orig_name]['name']['new_value'] = (
new_entity_name)
elif change.cmd == self.delete_entity_cmd:
entity_name = getattr(change, self.entity_name)
orig_name = original_names[entity_name]
del original_names[entity_name]
if orig_name in self.changed_entities:
continue
elif orig_name in self.added_entities:
self.added_entities.remove(orig_name)
else:
self.deleted_entities.append(orig_name)
elif change.cmd == self.edit_entity_property_cmd:
entity_name = getattr(change, self.entity_name)
orig_name = original_names[entity_name]
if orig_name in self.changed_entities:
continue
property_name = change.property_name
if orig_name not in self.property_changes:
self.property_changes[orig_name] = {}
if property_name not in self.property_changes[orig_name]:
self.property_changes[orig_name][property_name] = {
'old_value': change.old_value
}
self.property_changes[orig_name][property_name][
'new_value'] = change.new_value
unchanged_names = []
for name in self.property_changes:
unchanged_properties = []
changes = self.property_changes[name]
for property_name in changes:
if (changes[property_name]['old_value'] ==
changes[property_name]['new_value']):
unchanged_properties.append(property_name)
for property_name in unchanged_properties:
del changes[property_name]
if len(changes) == 0:
unchanged_names.append(name)
for name in unchanged_names:
del self.property_changes[name]
def get_summary_of_change_list(base_exploration, change_list):
"""Applies a changelist to a pristine exploration and returns a summary.
Each entry in change_list is a dict that represents an ExplorationChange
object.
Returns:
a dict with nine keys:
- exploration_property_changes: a dict, where each key is a
property_name of the exploration, and the corresponding values are
dicts with keys old_value and new_value.
- 4 'state' and 'gadget' change lists per data structures defined
in EntityChangeListSummarizer
"""
# TODO(anuzis): need to capture changes in gadget positioning
# between and within panels when we expand support for these actions.
# Ensure that the original exploration does not get altered.
exploration = copy.deepcopy(base_exploration)
changes = [
exp_domain.ExplorationChange(change_dict)
for change_dict in change_list]
# State changes
state_change_summarizer = EntityChangeListSummarizer(
_BASE_ENTITY_STATE)
state_change_summarizer.process_changes(
exploration.states.keys(), changes)
# Gadget changes
gadget_change_summarizer = EntityChangeListSummarizer(
_BASE_ENTITY_GADGET)
gadget_change_summarizer.process_changes(
exploration.get_all_gadget_names(), changes)
# Exploration changes
exploration_property_changes = {}
for change in changes:
if change.cmd == exp_domain.CMD_EDIT_EXPLORATION_PROPERTY:
property_name = change.property_name
if property_name not in exploration_property_changes:
exploration_property_changes[property_name] = {
'old_value': change.old_value
}
exploration_property_changes[property_name]['new_value'] = (
change.new_value)
elif (
change.cmd ==
exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION):
continue
unchanged_exp_properties = []
for property_name in exploration_property_changes:
if (exploration_property_changes[property_name]['old_value'] ==
exploration_property_changes[property_name]['new_value']):
unchanged_exp_properties.append(property_name)
for property_name in unchanged_exp_properties:
del exploration_property_changes[property_name]
return {
'exploration_property_changes': exploration_property_changes,
'state_property_changes': state_change_summarizer.property_changes,
'changed_states': state_change_summarizer.changed_entities,
'added_states': state_change_summarizer.added_entities,
'deleted_states': state_change_summarizer.deleted_entities,
'gadget_property_changes': gadget_change_summarizer.property_changes,
'changed_gadgets': gadget_change_summarizer.changed_entities,
'added_gadgets': gadget_change_summarizer.added_entities,
'deleted_gadgets': gadget_change_summarizer.deleted_entities
}
def _save_exploration(committer_id, exploration, commit_message, change_list):
"""Validates an exploration and commits it to persistent storage.
If successful, increments the version number of the incoming exploration
domain object by 1.
"""
if change_list is None:
change_list = []
exploration_rights = rights_manager.get_exploration_rights(exploration.id)
if exploration_rights.status != rights_manager.ACTIVITY_STATUS_PRIVATE:
exploration.validate(strict=True)
else:
exploration.validate()
exploration_model = exp_models.ExplorationModel.get(
exploration.id, strict=False)
if exploration_model is None:
exploration_model = exp_models.ExplorationModel(id=exploration.id)
else:
if exploration.version > exploration_model.version:
raise Exception(
'Unexpected error: trying to update version %s of exploration '
'from version %s. Please reload the page and try again.'
% (exploration_model.version, exploration.version))
elif exploration.version < exploration_model.version:
raise Exception(
'Trying to update version %s of exploration from version %s, '
'which is too old. Please reload the page and try again.'
% (exploration_model.version, exploration.version))
exploration_model.category = exploration.category
exploration_model.title = exploration.title
exploration_model.objective = exploration.objective
exploration_model.language_code = exploration.language_code
exploration_model.tags = exploration.tags
exploration_model.blurb = exploration.blurb
exploration_model.author_notes = exploration.author_notes
exploration_model.skin_customizations = (
exploration.skin_instance.to_dict()['skin_customizations'])
exploration_model.states_schema_version = exploration.states_schema_version
exploration_model.init_state_name = exploration.init_state_name
exploration_model.states = {
state_name: state.to_dict()
for (state_name, state) in exploration.states.iteritems()}
exploration_model.param_specs = exploration.param_specs_dict
exploration_model.param_changes = exploration.param_change_dicts
exploration_model.commit(committer_id, commit_message, change_list)
memcache_services.delete(_get_exploration_memcache_key(exploration.id))
index_explorations_given_ids([exploration.id])
exploration.version += 1
def _create_exploration(
committer_id, exploration, commit_message, commit_cmds):
"""Ensures that rights for a new exploration are saved first.
This is because _save_exploration() depends on the rights object being
present to tell it whether to do strict validation or not.
"""
# This line is needed because otherwise a rights object will be created,
# but the creation of an exploration object will fail.
exploration.validate()
rights_manager.create_new_exploration_rights(exploration.id, committer_id)
model = exp_models.ExplorationModel(
id=exploration.id,
category=exploration.category,
title=exploration.title,
objective=exploration.objective,
language_code=exploration.language_code,
tags=exploration.tags,
blurb=exploration.blurb,
author_notes=exploration.author_notes,
skin_customizations=exploration.skin_instance.to_dict(
)['skin_customizations'],
states_schema_version=exploration.states_schema_version,
init_state_name=exploration.init_state_name,
states={
state_name: state.to_dict()
for (state_name, state) in exploration.states.iteritems()},
param_specs=exploration.param_specs_dict,
param_changes=exploration.param_change_dicts,
)
model.commit(committer_id, commit_message, commit_cmds)
exploration.version += 1
create_exploration_summary(exploration.id, committer_id)
def save_new_exploration(committer_id, exploration):
commit_message = (
'New exploration created with title \'%s\'.' % exploration.title)
_create_exploration(committer_id, exploration, commit_message, [{
'cmd': CMD_CREATE_NEW,
'title': exploration.title,
'category': exploration.category,
}])
user_services.add_created_exploration_id(committer_id, exploration.id)
user_services.add_edited_exploration_id(committer_id, exploration.id)
def delete_exploration(committer_id, exploration_id, force_deletion=False):
"""Deletes the exploration with the given exploration_id.
IMPORTANT: Callers of this function should ensure that committer_id has
permissions to delete this exploration, prior to calling this function.
If force_deletion is True the exploration and its history are fully deleted
and are unrecoverable. Otherwise, the exploration and all its history are
marked as deleted, but the corresponding models are still retained in the
datastore. This last option is the preferred one.
"""
# TODO(sll): Delete the files too?
exploration_rights_model = exp_models.ExplorationRightsModel.get(
exploration_id)
exploration_rights_model.delete(
committer_id, '', force_deletion=force_deletion)
exploration_model = exp_models.ExplorationModel.get(exploration_id)
exploration_model.delete(
committer_id, feconf.COMMIT_MESSAGE_EXPLORATION_DELETED,
force_deletion=force_deletion)
# This must come after the exploration is retrieved. Otherwise the memcache
# key will be reinstated.
exploration_memcache_key = _get_exploration_memcache_key(exploration_id)
memcache_services.delete(exploration_memcache_key)
# Delete the exploration from search.
delete_documents_from_search_index([exploration_id])
# Delete the exploration summary, regardless of whether or not
# force_deletion is True.
delete_exploration_summary(exploration_id)
# Operations on exploration snapshots.
def get_exploration_snapshots_metadata(exploration_id):
"""Returns the snapshots for this exploration, as dicts.
Args:
exploration_id: str. The id of the exploration in question.
Returns:
list of dicts, each representing a recent snapshot. Each dict has the
following keys: committer_id, commit_message, commit_cmds, commit_type,
created_on_ms, version_number. The version numbers are consecutive and
in ascending order. There are exploration.version_number items in the
returned list.
"""
exploration = get_exploration_by_id(exploration_id)
current_version = exploration.version
version_nums = range(1, current_version + 1)
return exp_models.ExplorationModel.get_snapshots_metadata(
exploration_id, version_nums)
def _get_last_updated_by_human_ms(exp_id):
"""Return the last time, in milliseconds, when the given exploration was
updated by a human.
"""
# Iterate backwards through the exploration history metadata until we find
# the most recent snapshot that was committed by a human.
last_human_update_ms = 0
snapshots_metadata = get_exploration_snapshots_metadata(exp_id)
for snapshot_metadata in reversed(snapshots_metadata):
if snapshot_metadata['committer_id'] != feconf.MIGRATION_BOT_USER_ID:
last_human_update_ms = snapshot_metadata['created_on_ms']
break
return last_human_update_ms
def publish_exploration_and_update_user_profiles(committer_id, exp_id):
"""Publishes the exploration with publish_exploration() function in
rights_manager.py, as well as updates first_contribution_msec.
It is the responsibility of the caller to check that the exploration is
valid prior to publication.
"""
rights_manager.publish_exploration(committer_id, exp_id)
contribution_time_msec = utils.get_current_time_in_millisecs()
contributor_ids = get_exploration_summary_by_id(exp_id).contributor_ids
for contributor in contributor_ids:
user_services.update_first_contribution_msec_if_not_set(
contributor, contribution_time_msec)
def update_exploration(
committer_id, exploration_id, change_list, commit_message):
"""Update an exploration. Commits changes.
Args:
- committer_id: str. The id of the user who is performing the update
action.
- exploration_id: str. The exploration id.
- change_list: list of dicts, each representing a _Change object. These
changes are applied in sequence to produce the resulting exploration.
- commit_message: str or None. A description of changes made to the state.
For published explorations, this must be present; for unpublished
explorations, it should be equal to None.
"""
is_public = rights_manager.is_exploration_public(exploration_id)
if is_public and not commit_message:
raise ValueError(
'Exploration is public so expected a commit message but '
'received none.')
exploration = apply_change_list(exploration_id, change_list)
_save_exploration(committer_id, exploration, commit_message, change_list)
# Update summary of changed exploration.
update_exploration_summary(exploration.id, committer_id)
user_services.add_edited_exploration_id(committer_id, exploration.id)
if not rights_manager.is_exploration_private(exploration.id):
user_services.update_first_contribution_msec_if_not_set(
committer_id, utils.get_current_time_in_millisecs())
def create_exploration_summary(exploration_id, contributor_id_to_add):
"""Create summary of an exploration and store in datastore."""
exploration = get_exploration_by_id(exploration_id)
exp_summary = compute_summary_of_exploration(
exploration, contributor_id_to_add)
save_exploration_summary(exp_summary)
def update_exploration_summary(exploration_id, contributor_id_to_add):
"""Update the summary of an exploration."""
exploration = get_exploration_by_id(exploration_id)
exp_summary = compute_summary_of_exploration(
exploration, contributor_id_to_add)
save_exploration_summary(exp_summary)
def compute_summary_of_exploration(exploration, contributor_id_to_add):
"""Create an ExplorationSummary domain object for a given Exploration
domain object and return it. contributor_id_to_add will be added to
the list of contributors for the exploration if the argument is not
None and if the id is not a system id.
"""
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exploration.id)
exp_summary_model = exp_models.ExpSummaryModel.get_by_id(exploration.id)
if exp_summary_model:
old_exp_summary = get_exploration_summary_from_model(exp_summary_model)
ratings = old_exp_summary.ratings or feconf.get_empty_ratings()
contributor_ids = old_exp_summary.contributor_ids or []
else:
ratings = feconf.get_empty_ratings()
contributor_ids = []
# Update the contributor id list if necessary (contributors
# defined as humans who have made a positive (i.e. not just
# a revert) change to an exploration's content).
if (contributor_id_to_add is not None and
contributor_id_to_add not in feconf.SYSTEM_USER_IDS):
if contributor_id_to_add not in contributor_ids:
contributor_ids.append(contributor_id_to_add)
exploration_model_last_updated = datetime.datetime.fromtimestamp(
_get_last_updated_by_human_ms(exploration.id) / 1000.0)
exploration_model_created_on = exploration.created_on
exp_summary = exp_domain.ExplorationSummary(
exploration.id, exploration.title, exploration.category,
exploration.objective, exploration.language_code,
exploration.tags, ratings, exp_rights.status,
exp_rights.community_owned, exp_rights.owner_ids,
exp_rights.editor_ids, exp_rights.viewer_ids, contributor_ids,
exploration.version, exploration_model_created_on,
exploration_model_last_updated)
return exp_summary
def save_exploration_summary(exp_summary):
"""Save an exploration summary domain object as an ExpSummaryModel entity
in the datastore.
"""
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_summary.id,
title=exp_summary.title,
category=exp_summary.category,
objective=exp_summary.objective,
language_code=exp_summary.language_code,
tags=exp_summary.tags,
ratings=exp_summary.ratings,
status=exp_summary.status,
community_owned=exp_summary.community_owned,
owner_ids=exp_summary.owner_ids,
editor_ids=exp_summary.editor_ids,
viewer_ids=exp_summary.viewer_ids,
contributor_ids=exp_summary.contributor_ids,
version=exp_summary.version,
exploration_model_last_updated=(
exp_summary.exploration_model_last_updated),
exploration_model_created_on=(
exp_summary.exploration_model_created_on)
)
exp_summary_model.put()
def delete_exploration_summary(exploration_id):
"""Delete an exploration summary model."""
exp_models.ExpSummaryModel.get(exploration_id).delete()
def revert_exploration(
committer_id, exploration_id, current_version, revert_to_version):
"""Reverts an exploration to the given version number. Commits changes."""
exploration_model = exp_models.ExplorationModel.get(
exploration_id, strict=False)
if current_version > exploration_model.version:
raise Exception(
'Unexpected error: trying to update version %s of exploration '
'from version %s. Please reload the page and try again.'
% (exploration_model.version, current_version))
elif current_version < exploration_model.version:
raise Exception(
'Trying to update version %s of exploration from version %s, '
'which is too old. Please reload the page and try again.'
% (exploration_model.version, current_version))
# Validate the previous version of the exploration before committing the
# change.
exploration = get_exploration_by_id(
exploration_id, version=revert_to_version)
exploration_rights = rights_manager.get_exploration_rights(exploration.id)
if exploration_rights.status != rights_manager.ACTIVITY_STATUS_PRIVATE:
exploration.validate(strict=True)
else:
exploration.validate()
exp_models.ExplorationModel.revert(
exploration_model, committer_id,
'Reverted exploration to version %s' % revert_to_version,
revert_to_version)
memcache_services.delete(_get_exploration_memcache_key(exploration_id))
# Update the exploration summary, but since this is just a revert do
# not add the committer of the revert to the list of contributors.
update_exploration_summary(exploration_id, None)
# Creation and deletion methods.
def get_demo_exploration_components(demo_path):
"""Gets the content of `demo_path` in the sample explorations folder.
Args:
demo_path: the file or folder path for the content of an exploration
in SAMPLE_EXPLORATIONS_DIR. E.g.: 'adventure.yaml' or 'tar/'.
Returns:
a 2-tuple, the first element of which is a yaml string, and the second
element of which is a list of (filepath, content) 2-tuples. The filepath
does not include the assets/ prefix.
"""
demo_filepath = os.path.join(feconf.SAMPLE_EXPLORATIONS_DIR, demo_path)
if demo_filepath.endswith('yaml'):
file_contents = utils.get_file_contents(demo_filepath)
return file_contents, []
elif os.path.isdir(demo_filepath):
return utils.get_exploration_components_from_dir(demo_filepath)
else:
raise Exception('Unrecognized file path: %s' % demo_path)
def save_new_exploration_from_yaml_and_assets(
committer_id, yaml_content, title, category, exploration_id,
assets_list):
"""Note that the title and category will be ignored if the YAML
schema version is greater than
exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION,
since in that case there will already be a title and category present in
the YAML schema.
"""
if assets_list is None:
assets_list = []
yaml_dict = utils.dict_from_yaml(yaml_content)
if 'schema_version' not in yaml_dict:
raise Exception('Invalid YAML file: missing schema version')
exp_schema_version = yaml_dict['schema_version']
if (exp_schema_version <=
exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION):
# The schema of the YAML file for older explorations did not include
# a title and a category; these need to be manually specified.
exploration = exp_domain.Exploration.from_untitled_yaml(
exploration_id, title, category, yaml_content)
else:
exploration = exp_domain.Exploration.from_yaml(
exploration_id, yaml_content)
commit_message = (
'New exploration created from YAML file with title \'%s\'.'
% exploration.title)
_create_exploration(committer_id, exploration, commit_message, [{
'cmd': CMD_CREATE_NEW,
'title': exploration.title,
'category': exploration.category,
}])
for (asset_filename, asset_content) in assets_list:
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
fs.commit(committer_id, asset_filename, asset_content)
def delete_demo(exploration_id):
"""Deletes a single demo exploration."""
if not exp_domain.Exploration.is_demo_exploration_id(exploration_id):
raise Exception('Invalid demo exploration id %s' % exploration_id)
exploration = get_exploration_by_id(exploration_id, strict=False)
if not exploration:
logging.info('Exploration with id %s was not deleted, because it '
'does not exist.' % exploration_id)
else:
delete_exploration(
feconf.SYSTEM_COMMITTER_ID, exploration_id, force_deletion=True)
def load_demo(exploration_id):
"""Loads a demo exploration.
The resulting exploration will have two commits in its history (one for its
initial creation and one for its subsequent modification.)
"""
delete_demo(exploration_id)
if not exp_domain.Exploration.is_demo_exploration_id(exploration_id):
raise Exception('Invalid demo exploration id %s' % exploration_id)
exp_filename = feconf.DEMO_EXPLORATIONS[exploration_id]
yaml_content, assets_list = get_demo_exploration_components(exp_filename)
save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, None, None,
exploration_id, assets_list)
publish_exploration_and_update_user_profiles(
feconf.SYSTEM_COMMITTER_ID, exploration_id)
index_explorations_given_ids([exploration_id])
logging.info('Exploration with id %s was loaded.' % exploration_id)
def get_next_page_of_all_commits(
page_size=feconf.COMMIT_LIST_PAGE_SIZE, urlsafe_start_cursor=None):
"""Returns a page of commits to all explorations in reverse time order.
The return value is a triple (results, cursor, more) as described in
fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass
"""
results, new_urlsafe_start_cursor, more = (
exp_models.ExplorationCommitLogEntryModel.get_all_commits(
page_size, urlsafe_start_cursor))
return ([exp_domain.ExplorationCommitLogEntry(
entry.created_on, entry.last_updated, entry.user_id, entry.username,
entry.exploration_id, entry.commit_type, entry.commit_message,
entry.commit_cmds, entry.version, entry.post_commit_status,
entry.post_commit_community_owned, entry.post_commit_is_private
) for entry in results], new_urlsafe_start_cursor, more)
def get_next_page_of_all_non_private_commits(
page_size=feconf.COMMIT_LIST_PAGE_SIZE, urlsafe_start_cursor=None,
max_age=None):
"""Returns a page of non-private commits in reverse time order. If max_age
is given, it should be a datetime.timedelta instance.
The return value is a triple (results, cursor, more) as described in
fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass
"""
if max_age is not None and not isinstance(max_age, datetime.timedelta):
raise ValueError(
"max_age must be a datetime.timedelta instance. or None.")
results, new_urlsafe_start_cursor, more = (
exp_models.ExplorationCommitLogEntryModel.get_all_non_private_commits(
page_size, urlsafe_start_cursor, max_age=max_age))
return ([exp_domain.ExplorationCommitLogEntry(
entry.created_on, entry.last_updated, entry.user_id, entry.username,
entry.exploration_id, entry.commit_type, entry.commit_message,
entry.commit_cmds, entry.version, entry.post_commit_status,
entry.post_commit_community_owned, entry.post_commit_is_private
) for entry in results], new_urlsafe_start_cursor, more)
def _exp_rights_to_search_dict(rights):
# Allow searches like "is:featured".
doc = {}
if rights.status == rights_manager.ACTIVITY_STATUS_PUBLICIZED:
doc['is'] = 'featured'
return doc
def _should_index(exp):
rights = rights_manager.get_exploration_rights(exp.id)
return rights.status != rights_manager.ACTIVITY_STATUS_PRIVATE
def _get_search_rank(exp_id):
"""Returns an integer determining the document's rank in search.
Featured explorations get a ranking bump, and so do explorations that
have been more recently updated. Good ratings will increase the ranking
and bad ones will lower it.
"""
# TODO(sll): Improve this calculation.
time_now_msec = utils.get_current_time_in_millisecs()
rating_weightings = {'1': -5, '2': -2, '3': 2, '4': 5, '5': 10}
rights = rights_manager.get_exploration_rights(exp_id)
summary = get_exploration_summary_by_id(exp_id)
rank = _DEFAULT_RANK + (
_STATUS_PUBLICIZED_BONUS
if rights.status == rights_manager.ACTIVITY_STATUS_PUBLICIZED
else 0)
if summary.ratings:
for rating_value in summary.ratings:
rank += (
summary.ratings[rating_value] *
rating_weightings[rating_value])
last_human_update_ms = _get_last_updated_by_human_ms(exp_id)
time_delta_days = int(
(time_now_msec - last_human_update_ms) / _MS_IN_ONE_DAY)
if time_delta_days == 0:
rank += 80
elif time_delta_days == 1:
rank += 50
elif 2 <= time_delta_days <= 7:
rank += 35
# Ranks must be non-negative.
return max(rank, 0)
def _exp_to_search_dict(exp):
rights = rights_manager.get_exploration_rights(exp.id)
doc = {
'id': exp.id,
'language_code': exp.language_code,
'title': exp.title,
'category': exp.category,
'tags': exp.tags,
'blurb': exp.blurb,
'objective': exp.objective,
'author_notes': exp.author_notes,
'rank': _get_search_rank(exp.id),
}
doc.update(_exp_rights_to_search_dict(rights))
return doc
def clear_search_index():
"""WARNING: This runs in-request, and may therefore fail if there are too
many entries in the index.
"""
search_services.clear_index(SEARCH_INDEX_EXPLORATIONS)
def index_explorations_given_ids(exp_ids):
# We pass 'strict=False' so as not to index deleted explorations.
exploration_models = get_multiple_explorations_by_id(exp_ids, strict=False)
search_services.add_documents_to_index([
_exp_to_search_dict(exp) for exp in exploration_models.values()
if _should_index(exp)
], SEARCH_INDEX_EXPLORATIONS)
def patch_exploration_search_document(exp_id, update):
"""Patches an exploration's current search document, with the values
from the 'update' dictionary.
"""
doc = search_services.get_document_from_index(
exp_id, SEARCH_INDEX_EXPLORATIONS)
doc.update(update)
search_services.add_documents_to_index([doc], SEARCH_INDEX_EXPLORATIONS)
def update_exploration_status_in_search(exp_id):
rights = rights_manager.get_exploration_rights(exp_id)
if rights.status == rights_manager.ACTIVITY_STATUS_PRIVATE:
delete_documents_from_search_index([exp_id])
else:
patch_exploration_search_document(
rights.id, _exp_rights_to_search_dict(rights))
def delete_documents_from_search_index(exploration_ids):
search_services.delete_documents_from_index(
exploration_ids, SEARCH_INDEX_EXPLORATIONS)
def search_explorations(query, limit, sort=None, cursor=None):
"""Searches through the available explorations.
args:
- query_string: the query string to search for.
- sort: a string indicating how to sort results. This should be a string
of space separated values. Each value should start with a '+' or a
'-' character indicating whether to sort in ascending or descending
order respectively. This character should be followed by a field name
to sort on. When this is None, results are based on 'rank'. See
_get_search_rank to see how rank is determined.
- limit: the maximum number of results to return.
- cursor: A cursor, used to get the next page of results.
If there are more documents that match the query than 'limit', this
function will return a cursor to get the next page.
returns: a tuple:
- a list of exploration ids that match the query.
- a cursor if there are more matching explorations to fetch, None
otherwise. If a cursor is returned, it will be a web-safe string that
can be used in URLs.
"""
return search_services.search(
query, SEARCH_INDEX_EXPLORATIONS, cursor, limit, sort, ids_only=True)
| {
"content_hash": "8c392c9722ec0c4c84979ab8d7356dcd",
"timestamp": "",
"source": "github",
"line_count": 1396,
"max_line_length": 84,
"avg_line_length": 41.15186246418338,
"alnum_prop": 0.661589611474725,
"repo_name": "amitdeutsch/oppia",
"id": "b84f587a70bb2901c226d743409a60e2fd9d4190",
"size": "58071",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/exp_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "363"
},
{
"name": "CSS",
"bytes": "54327"
},
{
"name": "HTML",
"bytes": "391175"
},
{
"name": "JavaScript",
"bytes": "1807492"
},
{
"name": "Python",
"bytes": "2045189"
},
{
"name": "Shell",
"bytes": "35261"
}
],
"symlink_target": ""
} |
import argparse
import base64
import json
import cv2
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
import scipy.misc
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
from keras.optimizers import Adam
import driving_data
# Fix error with Keras and TensorFlow
import tensorflow as tf
tf.python.control_flow_ops = tf
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
#print(image_array.shape)
#transformed_image_array=driving_data.process_image_comma_pixels(image_array)
b = image_array[None, :, :, :].transpose(0, 3, 1, 2)
transformed_image_array = b
#print(transformed_image_array.shape)
'''
image_array = image_array[55:135, :, :]
mean=0
image_array=cv2.copyMakeBorder(image_array, top=55, bottom=25 , left=0, right=0, borderType= cv2.BORDER_CONSTANT, value=[mean,mean,mean] )
#cv2.resize(image, (160,320))
b = image_array[None, :, :, :].transpose(0, 3, 1, 2)
print(b.shape)
transformed_image_array = b
'''
#transformed_image_array = image_array[None, :, :, :]
# This model currently assumes that the features of the model are just the images. Feel free to change this.
#print("about to call predict")
steering_angle = float(model.predict(transformed_image_array, batch_size=1))
#print("after predict")
steering_angle = steering_angle * scipy.pi / 180
#steering_angle = steering_angle * scipy.pi / 180
# steering_angle = steering_angle / 2
#print("steering angle"+str(steering_angle))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
speed = float(speed)
# TODO - change this
if speed < 10.0:
throttle = 0.7
elif speed < 15.0:
throttle = 0.4
elif speed < 22.0:
throttle = 0.18
else:
throttle = 0.15
#throttle = 0.2
#print(steering_angle, throttle)
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
# NOTE: if you saved the file by calling json.dump(model.to_json(), ...)
# then you will have to call:
#
#model = model_from_json(json.loads(jfile.read()))
#model.summary()
#
# instead.
model = model_from_json(jfile.read())
model.summary()
learning_rate=0.0001
model.compile(Adam(lr=learning_rate), "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| {
"content_hash": "1d2765469dfbb5d80b1884bea14515ad",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 142,
"avg_line_length": 30.440944881889763,
"alnum_prop": 0.6652871184687015,
"repo_name": "alanswx/udacity-hw-cloning",
"id": "8d1f794ad8f00047b23c05e6a1f3e378a06a4f3a",
"size": "3866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commaai_drive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1938989"
},
{
"name": "Python",
"bytes": "67025"
}
],
"symlink_target": ""
} |
import sys
from contextlib import contextmanager
from PySide.QtGui import *
class Window(QWidget):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
@contextmanager
def application():
"""Enable running from within and outside host"""
if not QApplication.instance(): # or using qApp
app = QApplication(sys.argv)
yield
app.exec_()
else:
yield
with application():
window = Window()
window.show()
| {
"content_hash": "6f59945c5d663ab912d257d55b54e6b3",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 53,
"avg_line_length": 18.037037037037038,
"alnum_prop": 0.6344969199178645,
"repo_name": "madoodia/codeLab",
"id": "3d83d9aa09c714e3a8d24778453e65078fb4dd55",
"size": "487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyside/inside_outside_host_gui_with_contextmanager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "26584"
},
{
"name": "C#",
"bytes": "3735"
},
{
"name": "C++",
"bytes": "1041008"
},
{
"name": "CMake",
"bytes": "2191"
},
{
"name": "CSS",
"bytes": "14746"
},
{
"name": "HTML",
"bytes": "6401216"
},
{
"name": "Makefile",
"bytes": "17623"
},
{
"name": "Prolog",
"bytes": "295"
},
{
"name": "Python",
"bytes": "218348"
},
{
"name": "QML",
"bytes": "23919"
},
{
"name": "QMake",
"bytes": "1554"
},
{
"name": "Shell",
"bytes": "16371"
}
],
"symlink_target": ""
} |
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class TopicsOperations(object):
"""TopicsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def list_all(
self, resource_group_name, namespace_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the topics in a namespace.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TopicResourcePaged
<azure.mgmt.servicebus.models.TopicResourcePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.TopicResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.TopicResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, namespace_name, topic_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates a topic in the specified namespace.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param parameters: Parameters supplied to create a topic resource.
:type parameters: :class:`TopicCreateOrUpdateParameters
<azure.mgmt.servicebus.models.TopicCreateOrUpdateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TopicResource
<azure.mgmt.servicebus.models.TopicResource>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'topicName': self._serialize.url("topic_name", topic_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TopicCreateOrUpdateParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TopicResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, namespace_name, topic_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a topic from the specified namespace and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param topic_name: The name of the topic to delete.
:type topic_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'topicName': self._serialize.url("topic_name", topic_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, resource_group_name, namespace_name, topic_name, custom_headers=None, raw=False, **operation_config):
"""Returns a description for the specified topic.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TopicResource
<azure.mgmt.servicebus.models.TopicResource>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'topicName': self._serialize.url("topic_name", topic_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TopicResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_authorization_rules(
self, resource_group_name, namespace_name, topic_name, custom_headers=None, raw=False, **operation_config):
"""Gets authorization rules for a topic.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SharedAccessAuthorizationRuleResourcePaged
<azure.mgmt.servicebus.models.SharedAccessAuthorizationRuleResourcePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/authorizationRules'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'topicName': self._serialize.url("topic_name", topic_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SharedAccessAuthorizationRuleResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SharedAccessAuthorizationRuleResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update_authorization_rule(
self, resource_group_name, namespace_name, topic_name, authorization_rule_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates an authorizatio rule for the specified topic.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param authorization_rule_name: Authorization rule name.
:type authorization_rule_name: str
:param parameters: The shared access authorization rule.
:type parameters:
:class:`SharedAccessAuthorizationRuleCreateOrUpdateParameters
<azure.mgmt.servicebus.models.SharedAccessAuthorizationRuleCreateOrUpdateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SharedAccessAuthorizationRuleResource
<azure.mgmt.servicebus.models.SharedAccessAuthorizationRuleResource>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/authorizationRules/{authorizationRuleName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'topicName': self._serialize.url("topic_name", topic_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'SharedAccessAuthorizationRuleCreateOrUpdateParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SharedAccessAuthorizationRuleResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_authorization_rule(
self, resource_group_name, namespace_name, topic_name, authorization_rule_name, custom_headers=None, raw=False, **operation_config):
"""Returns the specified authorization rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param authorization_rule_name: Authorization rule name.
:type authorization_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SharedAccessAuthorizationRuleResource
<azure.mgmt.servicebus.models.SharedAccessAuthorizationRuleResource>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/authorizationRules/{authorizationRuleName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'topicName': self._serialize.url("topic_name", topic_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SharedAccessAuthorizationRuleResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_authorization_rule(
self, resource_group_name, namespace_name, topic_name, authorization_rule_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a topic authorization rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param authorization_rule_name: Authorization rule name.
:type authorization_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/authorizationRules/{authorizationRuleName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'topicName': self._serialize.url("topic_name", topic_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def list_keys(
self, resource_group_name, namespace_name, topic_name, authorization_rule_name, custom_headers=None, raw=False, **operation_config):
"""Gets the primary and secondary connection strings for the topic.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceListKeys
<azure.mgmt.servicebus.models.ResourceListKeys>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/authorizationRules/{authorizationRuleName}/ListKeys'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'topicName': self._serialize.url("topic_name", topic_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceListKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def regenerate_keys(
self, resource_group_name, namespace_name, topic_name, authorization_rule_name, policykey=None, custom_headers=None, raw=False, **operation_config):
"""Regenerates primary or secondary connection strings for the topic.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param topic_name: The topic name.
:type topic_name: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:param policykey: Key that needs to be regenerated. Possible values
include: 'PrimaryKey', 'SecondaryKey'
:type policykey: str or :class:`Policykey
<azure.mgmt.servicebus.models.Policykey>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceListKeys
<azure.mgmt.servicebus.models.ResourceListKeys>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.RegenerateKeysParameters(policykey=policykey)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/authorizationRules/{authorizationRuleName}/regenerateKeys'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'topicName': self._serialize.url("topic_name", topic_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RegenerateKeysParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceListKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| {
"content_hash": "3c082affb99dc654652ea67ec56e70db",
"timestamp": "",
"source": "github",
"line_count": 708,
"max_line_length": 217,
"avg_line_length": 49.259887005649716,
"alnum_prop": 0.6545762128684482,
"repo_name": "rjschwei/azure-sdk-for-python",
"id": "ac2a435d9efea59384d6c173311b63edc9f88072",
"size": "35350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-servicebus/azure/mgmt/servicebus/operations/topics_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8317911"
}
],
"symlink_target": ""
} |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent603xA import *
class agilent6038A(agilent603xA):
"Agilent 6038A IVI DC power supply driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '6038A')
super(agilent6038A, self).__init__(*args, **kwargs)
self._output_count = 1
self._output_spec = [
{
'range': {
'P60V': (61.425, 10.2375)
},
'ovp_max': 63.0,
'voltage_max': 61.425,
'current_max': 10.2375
}
]
| {
"content_hash": "01976ffc7e8b4ef7be0af275caedbded",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 77,
"avg_line_length": 34.78,
"alnum_prop": 0.6716503737780334,
"repo_name": "Diti24/python-ivi",
"id": "5c980abfe9ca94b1ecc9fd067d7d84a1041d6aa3",
"size": "1739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ivi/agilent/agilent6038A.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1992462"
}
],
"symlink_target": ""
} |
import gzip
import logging
import os
import pathlib
import urllib.request
import pandas as pd
import requests
from google.cloud import storage
def main(
source_url: str,
source_file: pathlib.Path,
target_file: pathlib.Path,
target_gcs_bucket: str,
target_gcs_path: str,
):
logging.info("NOAA Lightning Strikes By Year process started")
if url_is_reachable(source_url):
logging.info("creating 'files' folder")
pathlib.Path("./files").mkdir(parents=True, exist_ok=True)
source_file_zipped = str(source_file) + ".gz"
source_file_unzipped = str(source_file) + ".1"
logging.info(f"Downloading source file {source_url}")
download_file(source_url, source_file_zipped)
logging.info(f"Decompressing {source_file_unzipped}")
gz_decompress(source_file_zipped, source_file_unzipped)
logging.info(f"Removing unnecessary header in {source_file_unzipped}")
os.system(f"echo 'DATE,LONGITUDE,LATITUDE,TOTAL_COUNT' > {source_file}")
os.system(f"tail -n +4 {source_file_unzipped} >> {source_file}")
os.unlink(source_file_unzipped)
os.unlink(source_file_zipped)
logging.info(f"Opening source file {source_file}")
df = pd.read_csv(str(source_file))
logging.info(f"Transform: Renaming Headers.. {source_file}")
df.columns = ["day_int", "centerlon", "centerlat", "number_of_strikes"]
logging.info(f"Converting datetime format in {source_file}")
df["day"] = (
pd.to_datetime(
(df["day_int"][:].astype("string") + "000000"), "raise", False, True
).astype(str)
+ " 00:00:00"
)
df["center_point"] = (
"POINT("
+ df["centerlon"][:].astype("string")
+ " "
+ df["centerlat"][:].astype("string")
+ ")"
)
logging.info(f"Reordering columns in {source_file}")
df = df[["day", "number_of_strikes", "center_point"]]
logging.info(f"Transform: Saving to output file.. {target_file}")
df.to_csv(target_file, index=False)
logging.info(f"completed processing {source_url}")
logging.info(
f"Uploading output file to.. gs://{target_gcs_bucket}/{target_gcs_path}"
)
upload_file_to_gcs(target_file, target_gcs_bucket, target_gcs_path)
logging.info("NOAA Lightning Strikes By Year process completed")
else:
logging.info(f"Error: Unable to reach url: {source_url}")
logging.info("Process failed!")
def gz_decompress(infile: str, tofile: str) -> None:
with open(infile, "rb") as inf, open(tofile, "w", encoding="utf8") as tof:
decom_str = gzip.decompress(inf.read()).decode("utf-8")
tof.write(decom_str)
def url_is_reachable(url: str) -> bool:
request = urllib.request.Request(url)
request.get_method = lambda: "HEAD"
try:
urllib.request.urlopen(request)
return True
except urllib.request.HTTPError:
return False
def download_file(source_url: str, source_file: pathlib.Path) -> None:
r = requests.get(source_url, stream=True)
if r.status_code == 200:
with open(source_file, "wb") as f:
for chunk in r:
f.write(chunk)
else:
logging.error(f"Couldn't download {source_url}: {r.text}")
def upload_file_to_gcs(file_path: pathlib.Path, gcs_bucket: str, gcs_path: str) -> None:
storage_client = storage.Client()
bucket = storage_client.bucket(gcs_bucket)
blob = bucket.blob(gcs_path)
blob.upload_from_filename(file_path)
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
main(
source_url=os.environ["SOURCE_URL"],
source_file=pathlib.Path(os.environ["SOURCE_FILE"]).expanduser(),
target_file=pathlib.Path(os.environ["TARGET_FILE"]).expanduser(),
target_gcs_bucket=os.environ["TARGET_GCS_BUCKET"],
target_gcs_path=os.environ["TARGET_GCS_PATH"],
)
| {
"content_hash": "c7fb9c64c18bf853144a7d5151bdbe3b",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 88,
"avg_line_length": 31.625,
"alnum_prop": 0.6143774703557312,
"repo_name": "llooker/public-datasets-pipelines",
"id": "2244a729da78583e092d33757187066d2b2bb598",
"size": "4624",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "datasets/noaa/pipelines/_images/run_csv_transform_kub_lightning_strikes_by_year/csv_transform.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "58993"
},
{
"name": "HCL",
"bytes": "394340"
},
{
"name": "Jinja",
"bytes": "11245"
},
{
"name": "Jupyter Notebook",
"bytes": "15325"
},
{
"name": "Python",
"bytes": "2616241"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.dispatch import Signal
__all__ = ['hook', ]
class Hook(object):
"""
A dynamic-signal dispatcher.\
Should be used through :py:data:`hook`
thread-safety: it's not thread safe, this may change\
in the future if a RLock is added around _registry operations.\
In the meanwhile, you should register/connect/disconnect\
at import time (global scope) to ensure thread-safety,\
doing it in the AppConfig.ready() method is safe
"""
def __init__(self):
self._registry = {}
def register(self, name):
"""
Register a new hook. Not required (see :py:func:`.connect` method)
:param str name: The hook name
:return: Django signal
:rtype: :py:class:`django.dispatch.Signal`
"""
signal = Signal(providing_args=['args', 'kwargs'])
self._registry[name] = signal
return signal
def connect(self, name, func, sender=None, dispatch_uid=None):
"""
Connects a function to a hook.\
Creates the hook (name) if it does not exists
:param str name: The hook name
:param callable func: A function reference used as a callback
:param class sender: Optional sender __class__ to which the\
func should respond. Default will match all
:param str dispatch_uid: Optional unique id,\
see :py:class:`django.dispatch.Signal` for more info
"""
try:
signal = self._registry[name]
except KeyError:
signal = self.register(name)
signal.connect(func, sender=sender, dispatch_uid=dispatch_uid)
def disconnect(self, name, func, dispatch_uid=None):
"""
Disconnects a function from a hook
:param str name: The hook name
:param callable func: A function reference registered previously
:param str dispatch_uid: optional unique id,\
see :py:class:`django.dispatch.Signal` for more info.
"""
try:
signal = self._registry[name]
except KeyError:
return
signal.disconnect(func, dispatch_uid=dispatch_uid)
def send(self, name, sender=None, **kwargs):
"""
Sends the signal. Return every function response\
that was hooked to hook-name as a list: [(func, response), ]
:param str name: The hook name
:param class sender: Optional sender __class__ to which\
registered callback should match (see :py:func:`.connect` method)
:return: Signal responses as a sequence of tuples (func, response)
:rtype: list
"""
try:
signal = self._registry[name]
except KeyError:
return []
return signal.send(sender=sender, **kwargs)
hook = Hook()
| {
"content_hash": "76ea761213ce66816385ec0140d97b63",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 74,
"avg_line_length": 32.07954545454545,
"alnum_prop": 0.6110520722635494,
"repo_name": "nitely/django-hooks",
"id": "2e2017cd9bad722a7b60ed55688e91041878be9b",
"size": "2848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hooks/signalhook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37228"
}
],
"symlink_target": ""
} |
import datetime
import json
from django import forms
from django.conf import settings
import pyblog
from publisher.models import Post, KeyStore
class PostForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(PostForm, self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs['title'] = 'A catchy title'
self.fields['body'].widget.attrs['title'] = 'Pulitzer award winning text goes here, don\'t be shy'
self.fields['excerpt'].widget.attrs['title'] = 'Optionally, provide an excerpt'
self.fields['email'].widget.attrs['title'] = 'Lastly, we need your email for verification'
self.fields['wordpress_username'].widget.attrs['title'] = 'Your wordpress username'
self.fields['body'].widget.attrs['rows'] = 16
self.fields['excerpt'].widget.attrs['rows'] = 4
self.fields['categories'] = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, choices=[], required=False)
self.fields['tags'] = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, choices=[], required=False)
try:
settings.WORDPRESS['AUTH']['email']
except KeyError:
del(self.fields['email'])
try:
settings.WORDPRESS['AUTH']['wordpress']
self.fields['wordpress_password'] = forms.RegexField(
regex=r'^\w+$',
min_length=5,
max_length=50,
widget=forms.PasswordInput(render_value=False),
label="Wordpress Password",
error_messages={'invalid': 'This is an invalid wordpress password'},
required=False
)
try:
self.fields['email'].widget.attrs['title'] = 'Optionally, verify with your email if you don\'t have a wordpress account'
except KeyError:
pass
except KeyError:
pass
categories = []
try:
value = KeyStore.objects.get(key='blog_categories', updated_at__gte=datetime.datetime.now() - datetime.timedelta(hours=1))
categories = json.loads(value.value)
except KeyStore.DoesNotExist:
blog = pyblog.WordPress(settings.WORDPRESS['RPC_URL'], settings.WORDPRESS['USER'], settings.WORDPRESS['PASSWORD'])
categories = blog.get_categories()
value = json.dumps(categories)
try:
key = KeyStore.objects.get(key='blog_categories')
key.value = value
key.save()
except KeyStore.DoesNotExist:
key = KeyStore(key='blog_categories', value=value)
key.save()
for category in categories:
self.fields['categories'].choices.append((category['categoryName'], category['categoryName']))
tags = []
try:
value = KeyStore.objects.get(key='blog_tags', updated_at__gte=datetime.datetime.now() - datetime.timedelta(hours=1))
tags = json.loads(value.value)
except KeyStore.DoesNotExist:
blog = pyblog.WordPress(settings.WORDPRESS['RPC_URL'], settings.WORDPRESS['USER'], settings.WORDPRESS['PASSWORD'])
tags = blog.get_tags()
value = json.dumps(tags)
try:
key = KeyStore.objects.get(key='blog_tags')
key.value = value
key.save()
except KeyStore.DoesNotExist:
key = KeyStore(key='blog_tags', value=value)
key.save()
for tag in tags:
self.fields['tags'].choices.append((tag['name'], tag['name']))
class Meta:
model = Post
exclude = ['status', 'attachments']
def clean_email(self):
data = self.cleaned_data['email']
# whitelisting, we could blacklist too
if len(data):
try:
settings.WORDPRESS['AUTH']['email']['VALID_DOMAINS']
tA = data.split('@')
if tA[1] not in settings.WORDPRESS['AUTH']['email']['VALID_DOMAINS']:
raise forms.ValidationError('You must use a valid email address')
except KeyError:
pass
return data
| {
"content_hash": "8ca75dccd05012d443548fea233bbee6",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 136,
"avg_line_length": 39.81818181818182,
"alnum_prop": 0.5589041095890411,
"repo_name": "charlesmastin/django-wordpress-publisher",
"id": "1a337bfc5981b581c5f31b4184d166d3e1a5ac9e",
"size": "4380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "publisher/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "31186"
},
{
"name": "Python",
"bytes": "35548"
}
],
"symlink_target": ""
} |
import logging
import config
import redis
from rq import Queue
from modules.metadata.metadata import upload_metadata
from modules.loggingFunctions import initialize_logging
# logging
log_file = initialize_logging()
log = logging.getLogger(__name__)
redis_url = config.REDIS_URL
redis_conn = redis.from_url(redis_url)
multiples_q = Queue('multiples', connection=redis_conn, default_timeout=config.DEFAULT_TIMEOUT)
def blob_meta_enqueue(csv_file):
job_meta = multiples_q.enqueue(upload_metadata, csv_file, result_ttl=-1)
log.info('JOB ID IS: ' + job_meta.get_id())
return job_meta.get_id()
| {
"content_hash": "3829037249fc1cea388352e58a9319bf",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 95,
"avg_line_length": 31.736842105263158,
"alnum_prop": 0.7562189054726368,
"repo_name": "superphy/backend",
"id": "015537b4ee1326f93f367939971c9d31ed99e07e",
"size": "603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/modules/meta.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "392139"
}
],
"symlink_target": ""
} |
import collections
import json
import operator
from datetime import datetime
from django.conf import settings
from . import app_settings
from .exceptions import ACPCalendarException
# noinspection PyPep8Naming
def load_data(apps, schema_editor):
HolidayType = apps.get_model("acp_calendar", "HolidayType")
for holiday_type in get_holiday_type_list():
HolidayType.objects.create(**holiday_type)
# noinspection PyPep8Naming
ACPHoliday = apps.get_model("acp_calendar", "ACPHoliday")
for holiday_data in get_holidays_list():
try:
holiday_type = HolidayType.objects.get(short_name=holiday_data['holiday_type'])
# noinspection PyUnresolvedReferences
holiday_date = datetime.strptime(holiday_data['date'], app_settings.LOAD_DATE_FORMAT)
ACPHoliday.objects.get_or_create(date=holiday_date, holiday_type=holiday_type)
except HolidayType.DoesNotExist:
raise ACPCalendarException('Could not find a holiday type for %s' % holiday_data['holiday_type'])
# noinspection SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection
def get_holiday_type_list():
holiday_types = [{'name': 'Año Nuevo', 'short_name': 'año_nuevo'},
{'name': 'Día de los Mártires', 'short_name': 'mártires'},
{'name': 'Martes Carnaval', 'short_name': 'martes_carnaval'},
{'name': 'Viernes Santo', 'short_name': 'viernes_santo'},
{'name': 'Día del Trabajador', 'short_name': 'día_del_trabajo'},
{'name': 'Toma de Posesión Presidencial', 'short_name': 'toma_presidencial'},
{'name': 'Día de la Separación de Panamá de Colombia', 'short_name': 'separación_colombia'},
{'name': 'Día de Colón', 'short_name': 'colón'},
{'name': 'Primer Grito de Independencia', 'short_name': 'grito_independencia'},
{'name': 'Independencia de Panamá de España', 'short_name': 'independencia_españa'},
{'name': 'Día de la Madre', 'short_name': 'día_de_la_madre'},
{'name': 'Navidad', 'short_name': 'navidad'},
]
return holiday_types
def get_holidays_dictionary():
holiday_list = get_holidays_list()
holiday_dictionary = dict()
for holiday_data in holiday_list:
year = holiday_data['date'][:4]
if year not in holiday_dictionary.keys():
holiday_dictionary[year] = list()
holiday_dictionary[year].append(holiday_data)
ordered_holidays = collections.OrderedDict(sorted(holiday_dictionary.items()))
return ordered_holidays
def get_key(object):
return object['date']
def get_holidays_list(source_json_file=None):
if source_json_file is None:
data_filename = app_settings.INITIAL_DATA_FILENAME
elif source_json_file is not None and settings.DEBUG:
data_filename = source_json_file
else:
raise ValueError('Cannot change json source')
with open(data_filename, encoding='utf-8') as json_data:
holidays_list = json.load(json_data)
ordered_holidays_list = sorted(holidays_list, key=operator.itemgetter('date'))
return ordered_holidays_list
| {
"content_hash": "fecc94dbcd6d985fb720dfe1b0701f81",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 854,
"avg_line_length": 53.86666666666667,
"alnum_prop": 0.7022277227722772,
"repo_name": "luiscberrocal/django-acp-calendar",
"id": "18a1f6060b8a448f3961555bfea96e7115dda9ed",
"size": "4060",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "acp_calendar/initial_data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "309"
},
{
"name": "HTML",
"bytes": "15511"
},
{
"name": "Makefile",
"bytes": "1248"
},
{
"name": "Python",
"bytes": "92101"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
REQUIREMENTS = open(os.path.join(os.path.dirname(__file__), 'requirements.txt')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-trusts',
version='0.10.3',
description='Django authorization add-on for multiple organizations and object-level permission settings',
author='Thomas Yip',
author_email='[email protected]',
long_description=README,
url='http://github.com/beedesk/django-trusts',
packages=find_packages(exclude=[]),
test_suite="tests.runtests.runtests",
include_package_data=True,
zip_safe=False,
install_requires=REQUIREMENTS,
license='BSD 2-Clause',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries',
],
)
| {
"content_hash": "d4a2322f1ea9c6c0c91e768295b4ff73",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 110,
"avg_line_length": 33.75,
"alnum_prop": 0.6977777777777778,
"repo_name": "beedesk/django-trusts",
"id": "7d74ac633f768d97244f51afa13112a35b98264b",
"size": "1397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "77791"
}
],
"symlink_target": ""
} |
from . import AsyncTestCase, read_fixture, AsyncContext
from notifier import translate
from notifier.model import *
class TestGoogleTranslate(AsyncTestCase):
maxDiff = None
def setUp(self):
super().setUp()
self.client = translate.GoogleTranslateClient('API_KEY')
fixture = read_fixture('google_translate_languages.json')
self.mock_session_new.request.return_value = AsyncContext(context=self.make_response(body=fixture))
self.coro(self.client.bootstrap())
def test_translate(self):
fixture = read_fixture('google_translate.json')
expected = GoogleTranslation('Witaj świecie', 'nmt')
self.mock_session_new.request.return_value = AsyncContext(context=self.make_response(body=fixture))
actual = self.coro(self.client.translate('Hello world', 'en', 'pl'))
self.assertEqual(expected, actual)
def test_translate_fallback_to_major_locale(self):
fixture = read_fixture('google_translate.json')
self.mock_session_new.request.return_value = AsyncContext(context=self.make_response(body=fixture))
url = 'https://translation.googleapis.com/language/translate/v2/languages'
data = {'target': 'en', 'model': 'nmt', 'key': 'API_KEY'}
self.mock_session_new.request.assert_called_with('GET', url, params=data, timeout=0)
| {
"content_hash": "32343b012bd2088de4dcc00eaf40b988",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 107,
"avg_line_length": 46.41379310344828,
"alnum_prop": 0.6953937592867756,
"repo_name": "KeepSafe/translation-real-time-validaton",
"id": "6b6635ef15a10bed082c98dc65257de842301ba9",
"size": "1347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_google_translate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "464"
},
{
"name": "HTML",
"bytes": "2930"
},
{
"name": "Makefile",
"bytes": "1363"
},
{
"name": "Python",
"bytes": "81691"
}
],
"symlink_target": ""
} |
"""Helpers for batch requests to the Google Cloud Firestore API."""
from google.cloud.firestore_v1 import _helpers
class WriteBatch(object):
"""Accumulate write operations to be sent in a batch.
This has the same set of methods for write operations that
:class:`~google.cloud.firestore_v1.document.DocumentReference` does,
e.g. :meth:`~google.cloud.firestore_v1.document.DocumentReference.create`.
Args:
client (:class:`~google.cloud.firestore_v1.client.Client`):
The client that created this batch.
"""
def __init__(self, client):
self._client = client
self._write_pbs = []
self.write_results = None
self.commit_time = None
def _add_write_pbs(self, write_pbs):
"""Add `Write`` protobufs to this transaction.
This method intended to be over-ridden by subclasses.
Args:
write_pbs (List[google.cloud.proto.firestore.v1.\
write_pb2.Write]): A list of write protobufs to be added.
"""
self._write_pbs.extend(write_pbs)
def create(self, reference, document_data):
"""Add a "change" to this batch to create a document.
If the document given by ``reference`` already exists, then this
batch will fail when :meth:`commit`-ed.
Args:
reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`):
A document reference to be created in this batch.
document_data (dict): Property names and values to use for
creating a document.
"""
write_pbs = _helpers.pbs_for_create(reference._document_path, document_data)
self._add_write_pbs(write_pbs)
def set(self, reference, document_data, merge=False):
"""Add a "change" to replace a document.
See
:meth:`google.cloud.firestore_v1.document.DocumentReference.set` for
more information on how ``option`` determines how the change is
applied.
Args:
reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`):
A document reference that will have values set in this batch.
document_data (dict):
Property names and values to use for replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
"""
if merge is not False:
write_pbs = _helpers.pbs_for_set_with_merge(
reference._document_path, document_data, merge
)
else:
write_pbs = _helpers.pbs_for_set_no_merge(
reference._document_path, document_data
)
self._add_write_pbs(write_pbs)
def update(self, reference, field_updates, option=None):
"""Add a "change" to update a document.
See
:meth:`google.cloud.firestore_v1.document.DocumentReference.update`
for more information on ``field_updates`` and ``option``.
Args:
reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`):
A document reference that will be updated in this batch.
field_updates (dict):
Field names or paths to update and values to update with.
option (Optional[:class:`~google.cloud.firestore_v1.client.WriteOption`]):
A write option to make assertions / preconditions on the server
state of the document before applying changes.
"""
if option.__class__.__name__ == "ExistsOption":
raise ValueError("you must not pass an explicit write option to " "update.")
write_pbs = _helpers.pbs_for_update(
reference._document_path, field_updates, option
)
self._add_write_pbs(write_pbs)
def delete(self, reference, option=None):
"""Add a "change" to delete a document.
See
:meth:`google.cloud.firestore_v1.document.DocumentReference.delete`
for more information on how ``option`` determines how the change is
applied.
Args:
reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`):
A document reference that will be deleted in this batch.
option (Optional[:class:`~google.cloud.firestore_v1.client.WriteOption`]):
A write option to make assertions / preconditions on the server
state of the document before applying changes.
"""
write_pb = _helpers.pb_for_delete(reference._document_path, option)
self._add_write_pbs([write_pb])
def commit(self):
"""Commit the changes accumulated in this batch.
Returns:
List[:class:`google.cloud.proto.firestore.v1.write_pb2.WriteResult`, ...]:
The write results corresponding to the changes committed, returned
in the same order as the changes were applied to this batch. A
write result contains an ``update_time`` field.
"""
commit_response = self._client._firestore_api.commit(
self._client._database_string,
self._write_pbs,
transaction=None,
metadata=self._client._rpc_metadata,
)
self._write_pbs = []
self.write_results = results = list(commit_response.write_results)
self.commit_time = commit_response.commit_time
return results
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.commit()
| {
"content_hash": "21f36ec672d5a6bfdc515019e7525e85",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 88,
"avg_line_length": 39.04109589041096,
"alnum_prop": 0.6128070175438597,
"repo_name": "tswast/google-cloud-python",
"id": "56483af10c7270a509940be1e5235495fd2dbf33",
"size": "6296",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "firestore/google/cloud/firestore_v1/batch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
import sys
import subprocess
import os
import time
import json
orig="original/"
convert1="convert -resize 256x256! "
convert2="convert -resize 50x50! "
train="train/"
thumb="thumbnail/"
path=sys.argv[1]
savepath=sys.argv[2]
iname=sys.argv[3]
path=path.replace(' ','\\ ')
pos=path.rfind(".")
dir=path[:pos]
ext=path[pos:]
#check file format
if ext!=".zip" and ext!=".rar":
sys.stderr.write("wrong file format,abort")
sys.exit()
#check filename
if os.path.exists(dir):
#print("Directory exists!delete original directory")
subprocess.call("rm -rf "+dir,shell=True)
if ext==".zip":
subprocess.call("mkdir "+dir,shell=True)
unzipCmd="unzip "+path+" -d "+dir
elif ext==".rar":
#todo
sys.stderr.write("rar unavailable")
sys.exit()
#print(unzipCmd)
unzip=subprocess.call(unzipCmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
#check unzip result
#print(unzip)
if unzip>1:
sys.stderr.write("unzip failed")
sys.exit()
findCmd="find "+dir+" -name \"*.*\" "
findimage=subprocess.Popen(findCmd,shell=True,stdout=subprocess.PIPE)
findimage.wait()
filelist=[]
while True:
buf=findimage.stdout.readline()
buf=buf.replace("\n","")
if buf=="":
break
if len(buf)>0:
filelist.append(buf)
#print(len(filelist))
count=0
for file in filelist:
filec=file.replace(' ','\\ ')
ext=file[file.rfind("."):].lower()
if ext!=".jpg" and ext!=".gif" and ext!=".png" and ext!=".jpeg":
sys.stderr.write("file format error,abort")
sys.exit()
if os.path.getsize(file)==0:
#print("empty file:"+file+", skip")
continue
#print(file)
newfile="%s%s"%(iname,count)
#cpCmd="cp "+filec+" "+savepath+orig+newfile+ext
#cpCmd=cpCmd.replace("\n","")
sys.stdout.write(newfile+"\n")
#copy=subprocess.call(cpCmd,shell=True)
#if copy!=0:
# subprocess.call("rm -rf "+dir,shell=True)
# sys.stderr.write("copy failed,abort")
# sys.exit()
convert=subprocess.call("convert "+filec+" "+savepath+orig+newfile+".jpg",shell=True)
if convert!=0:
subprocess.call("rm -rf "+dir,shell=True)
sys.stderr.write("convert to jpeg failed!,abort")
sys.exit()
convert=subprocess.call(convert1+filec+" "+savepath+train+newfile+".jpg",shell=True)
if convert!=0:
subprocess.call("rm -rf "+dir,shell=True)
sys.stderr.write("convert to 256x256 failed!,abort")
sys.exit()
convert=subprocess.call(convert2+filec+" "+savepath+thumb+newfile+".jpg",shell=True)
if convert!=0:
subprocess.call("rm -rf "+dir,shell=True)
sys.stderr.write("convert to 50x50 failed!,abort")
sys.exit()
count=count+1
subprocess.call("rm -rf "+dir,shell=True)
| {
"content_hash": "f192a7f035a9605483daebb7b6e894dc",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 89,
"avg_line_length": 28.873684210526317,
"alnum_prop": 0.6419978126139264,
"repo_name": "vgene/Image-Management-NodeJS",
"id": "12b5ff17b5424abb53bc43e9d8cb4ed58e483609",
"size": "2743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/unzip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14007"
},
{
"name": "JavaScript",
"bytes": "370524"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
} |
""" S3 Person Registry, controllers
@author: nursix
"""
module = request.controller
# -----------------------------------------------------------------------------
# Options Menu (available in all Functions" Views)
def shn_menu():
response.menu_options = [
[T("Search for a Person"), False, URL(r=request, f="person", args="search_simple")],
[T("Persons"), False, URL(r=request, f="person"), [
[T("List"), False, URL(r=request, f="person")],
[T("Add"), False, URL(r=request, f="person", args="create")],
]],
[T("Groups"), False, URL(r=request, f="group"), [
[T("List"), False, URL(r=request, f="group")],
[T("Add"), False, URL(r=request, f="group", args="create")],
]]]
menu_selected = []
if session.rcvars and "pr_group" in session.rcvars:
group = db.pr_group
query = (group.id == session.rcvars["pr_group"])
record = db(query).select(group.id, group.name, limitby=(0, 1)).first()
if record:
name = record.name
menu_selected.append(["%s: %s" % (T("Group"), name), False,
URL(r=request, f="group", args=[record.id])])
if session.rcvars and "pr_person" in session.rcvars:
person = db.pr_person
query = (person.id == session.rcvars["pr_person"])
record = db(query).select(person.id, limitby=(0, 1)).first()
if record:
name = shn_pr_person_represent(record.id)
menu_selected.append(["%s: %s" % (T("Person"), name), False,
URL(r=request, f="person", args=[record.id])])
if menu_selected:
menu_selected = [T("Open recent"), True, None, menu_selected]
response.menu_options.append(menu_selected)
shn_menu()
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
try:
module_name = deployment_settings.modules[module].name_nice
except:
module_name = T("Person Registry")
def prep(jr):
if jr.representation == "html":
if not jr.id:
jr.method = "search_simple"
jr.custom_action = shn_pr_person_search_simple
else:
redirect(URL(r=request, f="person", args=[jr.id]))
return True
response.s3.prep = prep
def postp(jr, output):
if isinstance(output, dict):
gender = []
for g_opt in pr_gender_opts:
count = db((db.pr_person.deleted == False) & \
(db.pr_person.gender == g_opt)).count()
gender.append([str(pr_gender_opts[g_opt]), int(count)])
age = []
for a_opt in pr_age_group_opts:
count = db((db.pr_person.deleted == False) & \
(db.pr_person.age_group == a_opt)).count()
age.append([str(pr_age_group_opts[a_opt]), int(count)])
total = int(db(db.pr_person.deleted == False).count())
output.update(module_name=module_name, gender=gender, age=age, total=total)
if jr.representation in ("html", "popup"):
if not jr.component:
label = READ
else:
label = UPDATE
linkto = shn_linkto(jr, sticky=True)("[id]")
response.s3.actions = [
dict(label=str(label), _class="action-btn", url=str(linkto))
]
return output
response.s3.postp = postp
response.s3.pagination = True
output = shn_rest_controller("pr", "person")
response.view = "pr/index.html"
shn_menu()
return output
# -----------------------------------------------------------------------------
def person():
""" RESTful CRUD controller """
resource = request.function
def prep(r):
if r.component_name == "config":
_config = db.gis_config
defaults = db(_config.id == 1).select(limitby=(0, 1)).first()
for key in defaults.keys():
if key not in ["id", "uuid", "mci", "update_record", "delete_record"]:
_config[key].default = defaults[key]
if r.representation == "popup":
# Hide "pe_label" and "missing" fields in person popups
r.table.pe_label.readable = False
r.table.pe_label.writable = False
r.table.missing.readable = False
r.table.missing.writable = False
return True
response.s3.prep = prep
s3xrc.model.configure(db.pr_group_membership,
list_fields=["id",
"group_id",
"group_head",
"description"])
def postp(r, output):
if r.representation in ("html", "popup"):
if not r.component:
label = READ
else:
label = UPDATE
linkto = shn_linkto(r, sticky=True)("[id]")
response.s3.actions = [
dict(label=str(label), _class="action-btn", url=str(linkto))
]
return output
response.s3.postp = postp
response.s3.pagination = True
output = shn_rest_controller(module, resource,
listadd = False,
main="first_name",
extra="last_name",
rheader=lambda r: shn_pr_rheader(r,
tabs = [(T("Basic Details"), None),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Address"), "address"),
(T("Contact Data"), "pe_contact"),
(T("Memberships"), "group_membership"),
(T("Presence Log"), "presence"),
(T("Subscriptions"), "pe_subscription"),
(T("Map Settings"), "config")
]))
shn_menu()
return output
# -----------------------------------------------------------------------------
def group():
""" RESTful CRUD controller """
resource = request.function
response.s3.filter = (db.pr_group.system == False) # do not show system groups
s3xrc.model.configure(db.pr_group_membership,
list_fields=["id",
"person_id",
"group_head",
"description"])
def group_postp(jr, output):
if jr.representation in ("html", "popup"):
if not jr.component:
label = READ
else:
label = UPDATE
linkto = shn_linkto(jr, sticky=True)("[id]")
response.s3.actions = [
dict(label=str(label), _class="action-btn", url=linkto)
]
return output
response.s3.postp = group_postp
response.s3.pagination = True
output = shn_rest_controller(module, resource,
main="name",
extra="description",
rheader=lambda jr: shn_pr_rheader(jr,
tabs = [(T("Group Details"), None),
(T("Address"), "address"),
(T("Contact Data"), "pe_contact"),
(T("Members"), "group_membership")]),
deletable=False)
shn_menu()
return output
# -----------------------------------------------------------------------------
def image():
""" RESTful CRUD controller """
resource = request.function
return shn_rest_controller(module, resource)
# -----------------------------------------------------------------------------
def pe_contact():
""" RESTful CRUD controller """
resource = request.function
return shn_rest_controller(module, resource)
# -----------------------------------------------------------------------------
#def group_membership():
#""" RESTful CRUD controller """
#resource = request.function
#return shn_rest_controller(module, resource)
# -----------------------------------------------------------------------------
def pentity():
""" RESTful CRUD controller """
resource = request.function
response.s3.pagination = True
return shn_rest_controller(module, resource,
editable=False,
deletable=False,
listadd=False)
# -----------------------------------------------------------------------------
def download():
""" Download a file. """
return response.download(request, db)
# -----------------------------------------------------------------------------
def tooltip():
""" Ajax tooltips """
if "formfield" in request.vars:
response.view = "pr/ajaxtips/%s.html" % request.vars.formfield
return dict()
# -----------------------------------------------------------------------------
| {
"content_hash": "883445808b504e5d174f8f781429fd82",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 92,
"avg_line_length": 36.3515625,
"alnum_prop": 0.44315495379325165,
"repo_name": "luisibanez/SahanaEden",
"id": "12626b6b75d12949135fbeb77bfaec9d68315ed9",
"size": "9331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/pr.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7006"
},
{
"name": "JavaScript",
"bytes": "24979950"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "2980976"
}
],
"symlink_target": ""
} |
import ast
import codecs
import os
import sys
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
class VersionFinder(ast.NodeVisitor):
def __init__(self):
self.version = None
def visit_Assign(self, node):
if getattr(node.targets[0], 'id', None) == '__version__':
self.version = node.value.s
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
def find_version(*parts):
finder = VersionFinder()
finder.visit(ast.parse(read(*parts)))
return finder.version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT)
sys.path.insert(0, BASE_DIR)
def gen_ref(ver, title, names):
names = ["__init__", ] + names
refdir = os.path.join(BASE_DIR, "ref")
pkg = "home"
if ver:
pkg = "%s.%s" % (pkg, ver)
refdir = os.path.join(refdir, ver)
if not os.path.exists(refdir):
os.makedirs(refdir)
idxpath = os.path.join(refdir, "index.rst")
with open(idxpath, "w") as idx:
idx.write(("%(title)s\n"
"%(signs)s\n"
"\n"
".. toctree::\n"
" :maxdepth: 1\n"
"\n") % {"title": title, "signs": "=" * len(title)})
for name in names:
idx.write(" %s\n" % name)
rstpath = os.path.join(refdir, "%s.rst" % name)
with open(rstpath, "w") as rst:
vals = {
"pkg": pkg, "name": name
}
rst.write(
"\n"
".. automodule:: %(pkg)s.%(name)s\n"
" :members:\n"
" :undoc-members:\n"
" :show-inheritance:\n" % vals)
if not on_rtd:
gen_ref("", "Home (home)", ["__main__", "config", "exceptions", "util"])
gen_ref("collect", "Collect (home.collect)", ["handlers", "loop"])
gen_ref("dash", "Dashboard (home.dash)", ["api", "models", "web"])
gen_ref("ts", "Time Series (home.ts)", ["graph", "models"])
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'home'
copyright = '2014, Dougal Matthews'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = find_version("..", "..", "home", "__init__.py")
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'homedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'home.tex', 'home Documentation', 'Dougal Matthews', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'home', 'home Documentation',
['Dougal Matthews'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
'index',
'home',
'home Documentation',
'Dougal Matthews',
'home',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- RTD Theme ------------------------------------------------------------
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| {
"content_hash": "4649ffa0e430a90e1bf9ec56141a6252",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 79,
"avg_line_length": 30.373887240356083,
"alnum_prop": 0.6507424775302852,
"repo_name": "d0ugal-archive/home",
"id": "09fb34184aff90ba1416bfedab74905e4aab6a1a",
"size": "10676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "316839"
},
{
"name": "JavaScript",
"bytes": "1271936"
},
{
"name": "PHP",
"bytes": "38"
},
{
"name": "Python",
"bytes": "78278"
},
{
"name": "Shell",
"bytes": "8981"
}
],
"symlink_target": ""
} |
import settings
import nltk
import os
import re
from langdetect import detect
def fix_file(year, month, day, subject, source):
date = year + "-" + month + "-" + day
try:
output_file_path = settings.DOWNLOADS_NEWS + "/final/" + subject + "/news-" + subject + "-" + year + "-" + month + "-" + day + "-final.csv"
dir = os.path.dirname(os.path.realpath(output_file_path))
os.makedirs(dir, exist_ok=True)
output_file = open(output_file_path, "w")
if source == "bing":
input_file_path = settings.DOWNLOADS_NEWS + "/bing/" + subject + "/news-" + subject + "-" + year + "-" + month + "-" + day + ".csv"
elif source == "google":
input_file_path = settings.DOWNLOADS_NEWS + "/google/" + subject + "/" + date + "/" + subject + "_old_sentences_" + date + ".txt"
input_file = open(input_file_path, "r")
if subject == "mcdonalds":
subject = "mcdonald"
for row in input_file:
row = re.sub(r'(\.)([A-Z])', r'\1 \2', row)
for sentence in nltk.sent_tokenize(row):
if len(sentence) > 140:
continue
if subject not in sentence.lower():
continue
else:
if detect(sentence) == "es":
continue
output_file.write(sentence.strip() + "\n")
except Exception as e:
print(e)
pass
source = "bing"
year = "2017"
month = "04"
first_day = 1
last_day = 30
subjects = ["coca-cola", "mcdonalds", "microsoft", "netflix", "nike", "samsung", "tesla", "the"]
# subjects = ["mcdonalds"]
for subject in subjects:
for i in range(first_day, last_day+1):
day = str(i).zfill(2)
print("Subject: " + subject + ", Day: " + day)
fix_file(year, month, day, subject, source) | {
"content_hash": "0b576cc5d4b4cdd5e5b4f4b77302621b",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 147,
"avg_line_length": 30.258064516129032,
"alnum_prop": 0.5223880597014925,
"repo_name": "bromjiri/Presto",
"id": "3bc92e654d5f58c35f2d635f4ccdcfa9e9bf4f22",
"size": "1898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crawler/fixer/news_fix_sentences.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "243637"
},
{
"name": "Shell",
"bytes": "1066"
}
],
"symlink_target": ""
} |
import os
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy import create_engine
from slamon_afm.settings import Settings
engine = None
session_maker = sessionmaker()
create_session = scoped_session(session_maker)
Base = declarative_base()
def init_connection(unittest=False):
global engine, session_maker, create_session
if 'OPENSHIFT_POSTGRESQL_DB_URL' in os.environ:
engine = create_engine(os.environ['OPENSHIFT_POSTGRESQL_DB_URL'])
elif not unittest:
engine = create_engine('postgresql+psycopg2://' + Settings.database_user + ':' + Settings.database_password +
'@localhost/' + Settings.database_name)
else:
engine = create_engine('postgresql+psycopg2://' + Settings.test_database_user + ':' +
Settings.test_database_password + '@localhost/' + Settings.test_database_name)
create_session.remove()
Base.metadata.bind = engine
session_maker.configure(bind=engine)
return engine
| {
"content_hash": "ad225a266e5621f262b4cfb8c0b39d80",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 117,
"avg_line_length": 37.275862068965516,
"alnum_prop": 0.6947271045328399,
"repo_name": "jjonek/slamon-agent-fleet-manager",
"id": "6080508abe5e68f969b0b642fd17c93d3a43b906",
"size": "1081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slamon_afm/database.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5404"
},
{
"name": "Python",
"bytes": "51301"
}
],
"symlink_target": ""
} |
__author__ = 'David Maxwell <[email protected]>'
__date__ = '2012-10-31'
from ConfigParser import ConfigParser
from ifind.seeker.common_helpers import file_exists
from base_exception import Error as BaseError
class AbstractConfigReader(object):
'''
The abstract config file reader. Do NOT instantiate this class directly. The results obtained will be undefined. Extends this class for each different kind of config file you'd like to load and check. Once loaded, a derived class will contain a dictionary of settings that can be accessed using dot notation - and a types dictionary, too.
'''
settings_types = {} # A blank dictionary which contains the types for each value.
settings = {} # A blank settings for an abstract class.
config_type_method_dict = {} #is takes the type and relates it to the appropriate ConfigParser read method
def __init__(self, filename = None):
'''
Initialises the abstract class. Calls self.__set_settings(), which repopulates self.settings and populates self.settings_types. If a valid filename is provided, we then call self.__read_settings_from_file() to open the file and read the configuration settings from there.
'''
self.__config_parser = ConfigParser()
self.__set_settings(self.settings) # Referring to our settings
self.__config_type_method_dict = {
str : self.__config_parser.get,
bool : self.__config_parser.getboolean,
int : self.__config_parser.getint,
float : self.__config_parser.getfloat
}
if filename is not None:
if not file_exists(filename): # File doesn't exist!
raise IOError("Could not find the specified configuration file, %s." % (filename))
self.__read_settings_from_file(filename) # File does exist; attempt to read.
def __set_settings(self, settings):
'''
Populates the two dictionaries self.settings_types and self.settings based upon the initial dictionary self.settings defined in a concrete class. Resolves each of the tuples in this initial dictionary, setting self.settings to the default values.
'''
for section, values in settings.items():
nested_types = self.DictDotNotation()
nested_settings = self.DictDotNotation()
for value_name, value_tuple in values.items():
# tuple[0] == type, tuple[1] == default value
nested_types[value_name] = value_tuple[0]
nested_settings[value_name] = value_tuple[1]
# Appends our DictDotNotation dictionaries to the relevant parent dictionaries.
self.settings_types[section] = nested_types
self.settings[section] = nested_settings
# Convert our dictionaries to DictDotNotNotation so we can access them using dots!
self.settings_types = self.DictDotNotation(self.settings_types)
self.settings = self.DictDotNotation(self.settings)
def __read_settings_from_file(self, filename):
'''
Reads settings from the specified config file. Replaces the defaults in self.settings.
If a setting does not exist in the config file which does not have a default value, a BadConfigError exception is thrown.
'''
self.__config_parser.read(filename) # Opens the ConfigParser
for section, values in self.settings.items():
for value_name, default_value in values.items():
value_type = self.settings_types[section][value_name]
config_file_value = None
if self.__config_parser.has_option(section, value_name): # If the section/value combo exists within the config file, we want to pull its value out using the appropriate getter method (based on the expected type). If the value doesn't match the expected type, a ValueError exception will be thrown.
if value_type in self.__config_type_method_dict:
config_file_value = self.__config_type_method_dict[value_type](section, value_name)
else:
raise BadConfigError("Specified type '%s' cannot be used." % (str(value_type)))
if default_value == None and config_file_value == None: # No value found in the config file, and no default provided - we cannot continue!
raise BadConfigError(
"The required value '%s' in section '%s' of config file '%s' was not specified."
% (value_name, section, filename))
if config_file_value != None: # If a default was supplied, and the config file provides a new value, we replace the existing with this one.
self.settings[section][value_name] = config_file_value
def add_settings_group(self, group_name):
'''
Adds a new group to the settings dictionary. If the group name specified by parameter group_name already exists, this method simply returns False. Otherwise, the group is added to the settings dictionary, and True is returned to indicate a successful addition.
'''
if group_name not in self.settings:
new_dict = self.DictDotNotation()
self.settings[group_name] = new_dict
return True
return False # Group already exists; return False to show addition failed
def save(self, filename):
'''
Saves the settings dictionary to a file, specified by parameter filename. If the file exists, any contents within the file will be overwritten.
'''
for section_name, section_values in self.settings.items():
values_dict = {}
# Loop through each section's values, and check if they are not None (undefined) - if not, add them to temporary dictionary values_dict.
for value_name, value in section_values.items():
if value is not None:
values_dict[value_name] = value
# Using the temporary dictionary, check if the section has any values - if so, we add the section to the ConfigParser and add the associated values.
if len(values_dict) > 0:
self.__config_parser.add_section(section_name)
for value_name, value in values_dict.items():
self.__config_parser.set(section_name, value_name, value)
# Open the new file for writing, write to it, and close cleanly.
file_object = open(filename, 'w')
self.__config_parser.write(file_object)
file_object.close()
def print_params(self):
'''
Prints each parameter from a new instance of the calling class.
'''
settings = self.settings
for section, values in settings.items():
for value_name, value in values.items():
print "%s\t%s\t%s" % (section, value_name, value)
class DictDotNotation(dict):
'''
A class which extends the Python native dict object to allow the access of elements using dot notation. Based on code from http://parand.com/say/index.php/2008/10/24/python-dot-notation-dictionary-access/
'''
def __getattr__(self, attr):
return self.get(attr, None)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class BadConfigError(BaseError):
'''
Exception class used for the identification of a seekiir config error.
'''
pass | {
"content_hash": "c383a0245d05e5d3bb9705d283a41aca",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 342,
"avg_line_length": 52.76056338028169,
"alnum_prop": 0.6457554725040042,
"repo_name": "leifos/ifind",
"id": "fd0bfa80d5d632723022c07ee8fa3a448bb693cb",
"size": "7626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ifind/seeker/abstract_config_reader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "240161"
},
{
"name": "Shell",
"bytes": "392"
}
],
"symlink_target": ""
} |
import os
def genconfig() :
from SCons.Script import SCons
PreProcessor = SCons.cpp.PreProcessor()
try:
f = file('rtconfig.h', 'r')
contents = f.read()
f.close()
except :
print("Open rtconfig.h file failed.")
PreProcessor.process_contents(contents)
options = PreProcessor.cpp_namespace
try:
f = file('.config', 'w')
for (opt, value) in options.items():
if type(value) == type(1):
f.write("CONFIG_%s=%d\n" % (opt, value))
if type(value) == type('') and value == '':
f.write("CONFIG_%s=y\n" % opt)
elif type(value) == type('str'):
f.write("CONFIG_%s=%s\n" % (opt, value))
print("Generate .config done!")
f.close()
except:
print("Generate .config file failed.")
| {
"content_hash": "e31e4ff6dd929d05b9e3e0b7484c244a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 56,
"avg_line_length": 26.6875,
"alnum_prop": 0.5175644028103045,
"repo_name": "yongli3/rt-thread",
"id": "de8681e1f8804343294f6c8308167bb0dd7ae336",
"size": "854",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/genconf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "11333158"
},
{
"name": "Batchfile",
"bytes": "11246"
},
{
"name": "C",
"bytes": "531459994"
},
{
"name": "C++",
"bytes": "4923297"
},
{
"name": "CMake",
"bytes": "23011"
},
{
"name": "CSS",
"bytes": "9978"
},
{
"name": "DIGITAL Command Language",
"bytes": "13234"
},
{
"name": "GDB",
"bytes": "11796"
},
{
"name": "HTML",
"bytes": "4369259"
},
{
"name": "Lex",
"bytes": "7026"
},
{
"name": "Logos",
"bytes": "7078"
},
{
"name": "M4",
"bytes": "17515"
},
{
"name": "Makefile",
"bytes": "256896"
},
{
"name": "Module Management System",
"bytes": "1548"
},
{
"name": "Objective-C",
"bytes": "4097279"
},
{
"name": "PAWN",
"bytes": "1427"
},
{
"name": "Perl",
"bytes": "6931"
},
{
"name": "Python",
"bytes": "983462"
},
{
"name": "RPC",
"bytes": "14162"
},
{
"name": "Roff",
"bytes": "4486"
},
{
"name": "Ruby",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "407071"
},
{
"name": "TeX",
"bytes": "3113"
},
{
"name": "Yacc",
"bytes": "16084"
}
],
"symlink_target": ""
} |
if __name__ == '__main__':
import argparse
import os
from ard.main import ARD, readInput
# Set up parser for reading the input filename from the command line
parser = argparse.ArgumentParser(description='Automatic Reaction Discovery')
parser.add_argument('file', type=str, metavar='infile', help='An input file describing the job options')
args = parser.parse_args()
# Read input file
input_file = os.path.abspath(args.file)
kwargs = readInput(input_file)
# Set output directory
output_dir = os.path.abspath(os.path.dirname(input_file))
kwargs['output_dir'] = output_dir
# Execute job
ard = ARD(**kwargs)
ard.execute(**kwargs)
| {
"content_hash": "4282143cf525f4caffcc39a5f3e1f255",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 108,
"avg_line_length": 31.636363636363637,
"alnum_prop": 0.6752873563218391,
"repo_name": "cgrambow/AutomaticReactionDiscovery",
"id": "cd89237990ba996c494a1c9b712af1d5cfd244c8",
"size": "744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "183201"
}
],
"symlink_target": ""
} |
import sys
import svm
import svmutil
from svmutil import *
import os
import time
import mygrid
from multiprocessing import Pool
from scipy import *
from numpy import *
import matplotlib
#matplotlib.use("Agg")
from pylab import *
from matplotlib.backends.backend_pdf import PdfPages
#from numpy import mean,cov,double,cumsum,dot,linalg,array,rank
#from pylab import plot,subplot,axis,stem,show,figure
def load_data(fn,use_specific_fold_inds):
output = []
fieldnames = [];
numofcols = 0
numofrows = 0
for line in open(fn,'r').readlines():
if line[0] == '#':
fieldnames = line[1:].rstrip().rsplit(',')
# fieldnames = fieldnames[:-1]
continue
parts = line.rstrip().rsplit()
if parts[-1][0]=='#':
numofcols = max(numofcols,int(parts[-2].rsplit(':')[0]))
else:
numofcols = max(numofcols,int(parts[-1].rsplit(':')[0]))
numofrows += 1
input = zeros([numofrows,numofcols],dtype='float64')
fold_inds = []
rowind = 0
for line in open(fn,'r').readlines():
if line[0] == '#':
continue
parts = line.rstrip().rsplit()
output.append(float(parts[0]))
if parts[-1][0] == '#':
if use_specific_fold_inds:
fold_inds.append(int(parts[-1][1:]))
for keyval in parts[1:-1]:
key,val = keyval.rsplit(':')
if val == 'nan':
input[rowind,int(key)-1] = nan;
else:
input[rowind,int(key)-1] = float(val)
else:
for keyval in parts[1:]:
key,val = keyval.rsplit(':')
if val == 'nan':
input[rowind,int(key)-1] = nan;
else:
input[rowind,int(key)-1] = float(val)
rowind += 1
return output,input,fieldnames,fold_inds
class Timer():
def __enter__(self): self.start = time.time()
def __exit__(self, *args): print 'Entire Parameter Searching Experiment took %d seconds' % (time.time() - self.start)
def binarize_output(output,threshold,threshold_type):
newoutput = []
if threshold_type == 'percentile':
n = len(output)
temp = sorted(output)
boundary = temp[n*(1-threshold)]
newoutput = [1 if x > boundary else -1 for x in output]
else:
newoutput = [1 if x > threshold else -1 for x in output]
boundary = threshold
return newoutput,boundary
def save_scale_data(fn,maxinput,mininput):
finput = open(fn,'w')
for ind in xrange(len(maxinput)):
print >> finput, '%g, %g' % (mininput[ind],maxinput[ind])
finput.close()
def save_zscore_data(fn,means,stds):
finput = open(fn,'w')
for ind in xrange(len(means)):
print >> finput, '%g, %g' % (means[ind],stds[ind])
finput.close()
def main(args):
paramsfn = args[0]
exec(open(paramsfn,'r').read())
if len(args) > 1:
gammarange = [float(args[1])]
crange = [float(args[2])]
output,input,fieldnames,fold_inds = load_data(datafilename,use_specific_fold_inds)
sep_validation = False
if separate_validation_set != '':
output_valid,input_valid,fieldnames,fold_inds_valid = load_data(separate_validation_set,use_specific_fold_inds)
sep_validation = True
fold_start = [-1]
if sep_validation:
fold_start_valid = [-1]
if use_specific_fold_inds:
unique_fold_ids = unique(fold_inds)
row_inds = []
outputcopy = []
inputcopy = zeros([size(input,0),size(input,1)],dtype='float64')
fold_start = [0]
curind = 0
for ind in unique_fold_ids:
row_inds = [i for i in xrange(len(fold_inds)) if fold_inds[i] == ind]
inputcopy[curind:curind+len(row_inds),:] = input[row_inds,:]
outputcopy.extend([output[i] for i in row_inds])
curind += len(row_inds)
fold_start.append(fold_start[-1]+len(row_inds))
input = inputcopy
output = outputcopy
nf = len(fold_start)-1
if sep_validation:
unique_fold_ids_valid = unique(fold_inds_valid)
row_inds = []
outputcopy = []
inputcopy = zeros([size(input_valid,0),size(input_valid,1)],dtype='float64')
fold_start_valid = [0]
curind = 0
for ind in unique_fold_ids_valid:
row_inds = [i for i in xrange(len(fold_inds_valid)) if fold_inds_valid[i] == ind]
inputcopy[curind:curind+len(row_inds),:] = input_valid[row_inds,:]
outputcopy.extend([output_valid[i] for i in row_inds])
curind += len(row_inds)
fold_start_valid.append(fold_start_valid[-1]+len(row_inds))
input_valid = inputcopy
output_valid = outputcopy
nf = len(fold_start_valid)-1
if binarizeoutput:
output,boundary = binarize_output(output,binary_threshold,binary_boundary_type)
if testdatafilename != '':
output_test,input_test,fieldnames,fold_inds_test = load_data(testdatafilename,False)
if binarizeoutput:
output_test = [1 if x > boundary else -1 for x in output_test]
if doscale:
maxinput = input.max(0);
mininput = input.min(0);
input = (input-mininput)/(maxinput-mininput)
if testdatafilename != '':
input_test = (input_test-mininput)/(maxinput-mininput)
if savemodel:
save_scale_data(datafilename+'_scales.dat',maxinput,mininput)
if sep_validation:
input_valid = (input_valid-mininput)/(maxinput-mininput)
if donormalize:
means = input.mean(0)
stds = sqrt(input.var(0))
input = (input-means)/stds
if testdatafilename != '':
input_test = (input_test-means)/stds
if savemodel:
save_zscore_data(datafilename+'_meansstdevs.dat',means,stds)
if sep_validation:
input_valid = (input_valid-means)/stds
if numcpus == 'auto':
p = Pool()
else:
p = Pool(numcpus)
if choose_specific_features:
if choose_specific_features_increasing:
specific_selected_features = [specific_selected_features[:i] for i in xrange(2,len(specific_selected_features),2)]
for specific_selected_choice in specific_selected_features:
inputfiltered = input[:,specific_selected_choice]
if sep_validation:
inputfiltered_valid = input_valid[:,specific_selected_choice]
if dopca:
coeff,temp,latent = princomp(inputfiltered)
if savemodel:
save_pca_coeffs(datafilename+'_pcacoeffs.dat',coeff,mean(inputfiltered.T,axis=1))
inputfiltered = temp
if sep_validation:
return
with Timer():
if sep_validation:
if use_specific_fold_inds:
results = mygrid.grid_classify_sepvalid (crange,gammarange,output,[list(x) for x in inputfiltered],output_valid,[list(x) for x in inputfiltered_valid],nf,useprob,timeout,p,fold_start,fold_start_valid)
else:
results = mygrid.grid_classify_sepvalid (crange,gammarange,output,[list(x) for x in inputfiltered],output_valid,[list(x) for x in inputfiltered_valid],nf,useprob,timeout,p)
else:
if use_specific_fold_inds:
results = mygrid.grid_classify (crange,gammarange,output,[list(x) for x in inputfiltered],nf,useprob,timeout,p,fold_start)
else:
results = mygrid.grid_classify (crange,gammarange,output,[list(x) for x in inputfiltered],nf,useprob,timeout,p)
param = svm.svm_parameter('-c %g -g %g -b %d' % (results[-2],results[-1],int(useprob)))
prob = svm.svm_problem(output, [list(x) for x in inputfiltered])
fold_start_p = (c_int *len(fold_start))()
for i in xrange(len(fold_start)):
fold_start_p[i] = fold_start[i]
if posclass == 'auto':
posclass = output[0]
if sep_validation:
prob_valid = svm.svm_problem(output_valid, [list(x) for x in inputfiltered_valid])
testlength = prob_valid.l
fold_start_p_valid = (c_int *len(fold_start_valid))()
for i in xrange(len(fold_start_valid)):
fold_start_p_valid[i] = fold_start_valid[i]
else:
testlength = prob.l
target = (c_double * testlength)()
#[maxauc,maxoptacc,maxphi,minfpfnration,maxf1,optbias,optc,optgamma]
if sep_validation:
libsvm.svm_cross_validation_sepsets(prob, prob_valid,fold_start_p, fold_start_p_valid,param, nf, target)
else:
libsvm.svm_cross_validation(prob, fold_start_p, param, nf, target)
if sep_validation:
ys = prob_valid.y[:testlength]
else:
ys = prob.y[:prob.l]
db = array([[ys[i],target[i]] for i in range(testlength)])
neg = len([x for x in ys if x != posclass])
pos = testlength-neg;
if len(specific_selected_features) == 1 or True:
pdfpages = PdfPages('%s_train.pdf' % (outputlog))
# auc,topacc,optaccbias,topphi,optphibias,top_tps_bias,top_fps = mygrid.calc_AUC(db,neg,pos,posclass,useprob,[],True,pdfpages,'Optimal Cross-Validation ROC curve')
topacc,topphi,minfpfnratio,topf1,auc,optbias = mygrid.optimize_results(db,neg,pos,posclass,'F1')
print [topacc,results[1]]
print [topphi,results[2]]
print [topf1,results[4]]
print [auc,results[0]]
pdfpages.close()
# print target
if sep_validation:
ACC,PHI,confusionmatrix = mygrid.evaluations_classify(output_valid,target,posclass,results[-3])
else:
ACC,PHI,confusionmatrix = mygrid.evaluations_classify(output,target,posclass,results[-3])
if posclass == 1:
negclass = 0;
else:
negclass = 1;
numpred_pos = confusionmatrix[0,0]+confusionmatrix[1,0]
numpred_neg = confusionmatrix[0,1]+confusionmatrix[1,1]
N = pos+neg
probchance = (numpred_pos*pos+numpred_neg*neg)*1.0/(N*N)
kappa = (topacc-probchance)*1.0/(1-probchance);
print 'Train optimized accuracy = %g' % (topacc)
print 'Train optimized Phi statistic = %g' % (topphi)
print 'Train optimized kappa = %g' % (kappa)
print 'Train optimized F1 score = %f' % (topf1)
print 'Train optimized TP/RECALL = %g, FP = %g, PRECISION = %g' % (confusionmatrix[0,0]/pos,confusionmatrix[1,0]/neg,confusionmatrix[0,0]/(confusionmatrix[0,0]+confusionmatrix[1,0]))
print '================================'
print '|| ||%6d |%6d | ||' % (posclass,negclass)
print '================================'
print '||%3d||%6g |%6g |%6g ||' % (posclass,confusionmatrix[0,0],confusionmatrix[0,1],pos)#confusionmatrix[0,0]+confusionmatrix[0,1])
print '||%3d||%6g |%6g |%6g ||' % (negclass,confusionmatrix[1,0],confusionmatrix[1,1],neg)#confusionmatrix[1,0]+confusionmatrix[1,1])
print '||----------------------------||'
print '|| ||%6g |%6g |%6g ||' % (confusionmatrix[0,0]+confusionmatrix[1,0],confusionmatrix[0,1]+confusionmatrix[1,1],pos+neg)#confusionmatrix[1,0]+confusionmatrix[1,1])
print '================================'
else:
auc,topacc,optaccbias,topphi,optphibias,top_tps_bias,top_fps = mygrid.calc_AUC(db,neg,pos,posclass,useprob,[],False,0,'Optimal Cross-Validation ROC curve')
print 'Optimal gamma = %g\nOptimal c = %g\nOptimal Bias = %g' % (results[-1],results[-2],results[-3])
print 'Top CV results: AUC = %g, OPTIMIZED ACC = %g, OPTIMIZED PHI = %g' % (auc,topacc,topphi)
if outputlog != '':
fout = open(outputlog,'a')
print >> fout, '========================='
print >> fout, datafilename
print >> fout, doscale, donormalize, dopca, '(scale/norm/pca)'
print >> fout, crange[0],crange[-1], gammarange[0], gammarange[-1], '(cs,gammas)'
print >> fout, use_specific_fold_inds, nf, '(use specific folds, numfold)'
print >> fout, 'SPECIFIC FIELDS:'
print >> fout, specific_selected_choice
if fieldnames != []:
for i in specific_selected_choice:
print >> fout, fieldnames[i],
print >> fout
print >> fout, 'train: '
print >> fout, ' AUC=%g,ACC=%g,kappa=%g,phi=%g,f1=%g (g=%g,c=%g,bias=%g)' % (auc,topacc,kappa,topphi,topf1,results[-1],results[-2],results[-3])
print >> fout, ' ||%3d||%6g |%6g |%6g ||' % (posclass,confusionmatrix[0,0],confusionmatrix[0,1],pos)#confusionmatrix[0,0]+confusionmatrix[0,1])
print >> fout, ' ||%3d||%6g |%6g |%6g ||' % (negclass,confusionmatrix[1,0],confusionmatrix[1,1],neg)#confusionmatrix[1,0]+confusionmatrix[1,1])
fout.close()
if outputpredictions:
fout = open(predictionslog,'w')
if sep_validation:
for ind in xrange(len(output_valid)):
label = output_valid[ind]
value = target[ind]
oneinputrow = input_valid[ind,:]
print >> fout, value, label,
for j in xrange(len(oneinputrow)):
print >> fout, '%d:%f' % (j+1,oneinputrow[j]),
print >> fout
else:
for ind in xrange(len(output)):
label = output[ind]
value = target[ind]
oneinputrow = input[ind,:]
print >> fout, value, label,
for j in xrange(len(oneinputrow)):
print >> fout, '%d:%f' % (j+1,oneinputrow[j]),
print >> fout
fout.close()
del target
if savemodel:
param = ('-c %g -g %g -b %d' % (results[-2],results[-1],int(useprob)))
m = svm_train(output,[list(x) for x in inputfiltered],param)
svm_save_model(datafilename + '.model',m)
if testdatafilename != '':
inputfiltered_test = input_test[:,specific_selected_choice]
if dopca:
M = (inputfiltered_test-mean(inputfiltered_test.T,axis=1)).T # subtract the mean (along columns)
inputfiltered_test = dot(coeff.T,M).T # projection of the data in the new space
param = ('-c %g -g %g -b %d' % (results[-2],results[-1],int(useprob)))
m = svm_train(output,[list(x) for x in inputfiltered],param)
pred_labels, (ACC, MSE, SCC), pred_values = svm_predict(output_test,[list(x) for x in inputfiltered_test],m,'-b %d' % (int(useprob)))
ACC,PHI,confusionmatrix = mygrid.evaluations_classify(output_test, [x[0] for x in pred_values],posclass,results[-3])
db = array([[output_test[i],pred_values[i][0]] for i in range(len(output_test))])
neg = len([x for x in output_test if x != posclass])
pos = len(output_test)-neg
auctest = 0
if neg != 0 and pos != 0:
auctest,topacctest,optaccbias,topphitest,optphibias,top_tps_bias,top_fps = mygrid.calc_AUC(db,neg,pos,posclass,useprob,[],False,pdfpages,'Test ROC curve',results[-3])
numpred_pos = confusionmatrix[0,0]+confusionmatrix[1,0]
numpred_neg = confusionmatrix[0,1]+confusionmatrix[1,1]
N = pos+neg
probchance = (numpred_pos*pos+numpred_neg*neg)*1.0/(N*N)
testkappa = (ACC/100.0-probchance)*1.0/(1-probchance);
print 'Test optimized accuracy = %g' % (ACC)
print 'Test optimized Phi statistic = %g' % (PHI)
print 'Test optimized kappa = %g' % (testkappa)
print '================================'
print '|| ||%6d |%6d | ||' % (m.get_labels()[0],m.get_labels()[1])
print '================================'
print '||%3d||%6g |%6g |%6g ||' % (m.get_labels()[0],confusionmatrix[0,0],confusionmatrix[0,1],pos)#confusionmatrix[0,0]+confusionmatrix[0,1])
print '||%3d||%6g |%6g |%6g ||' % (m.get_labels()[1],confusionmatrix[1,0],confusionmatrix[1,1],neg)#confusionmatrix[1,0]+confusionmatrix[1,1])
print '||----------------------------||'
print '|| ||%6g |%6g |%6g ||' % (confusionmatrix[0,0]+confusionmatrix[1,0],confusionmatrix[0,1]+confusionmatrix[1,1],pos+neg)#confusionmatrix[1,0]+confusionmatrix[1,1])
print '================================'
if outputlog != '':
fout = open(outputlog,'a')
print >> fout, 'test: '
print >> fout, ' ACC=%g,AUC=%g,kappa=%g,phi=%g' % (ACC,auctest,testkappa,PHI)
print >> fout, ' ||%3d||%6g |%6g |%6g ||' % (m.get_labels()[0],confusionmatrix[0,0],confusionmatrix[0,1],pos)#confusionmatrix[0,0]+confusionmatrix[0,1])
print >> fout, ' ||%3d||%6g |%6g |%6g ||' % (m.get_labels()[1],confusionmatrix[1,0],confusionmatrix[1,1],neg)#confusionmatrix[1,0]+confusionmatrix[1,1])
fout.close()
else:
with Timer():
if use_specific_fold_inds:
results = mygrid.grid_classify (crange,gammarange,output,[list(x) for x in input],nf,useprob,timeout,p,fold_start)
else:
results = mygrid.grid_classify (crange,gammarange,output,[list(x) for x in input],nf,useprob,timeout,p)
param = svm.svm_parameter('-c %g -g %g -b %d' % (results[-2],results[-1],int(useprob)))
prob = svm.svm_problem(output, [list(x) for x in input])
target = (c_double * prob.l)()
fold_start_p = (c_int *len(fold_start))()
for i in xrange(len(fold_start)):
fold_start_p[i] = fold_start[i]
if posclass == 'auto':
posclass = output[0]
libsvm.svm_cross_validation(prob, fold_start_p, param, nf, target)
ys = prob.y[:prob.l]
db = [[ys[i],target[i]] for i in range(prob.l)]
db = array(db)
neg = len([x for x in ys if x != posclass])
pos = prob.l-neg;
pdfpages = PdfPages('%s_train.pdf' % (outputlog))
auc,topacc,optaccbias,topphi,optphibias,top_tps_bias,top_fps = mygrid.calc_AUC(db,neg,pos,posclass,useprob,[],True,pdfpages,'Optimal Cross-Validation ROC curve')
pdfpages.close()
ACC,PHI,confusionmatrix = mygrid.evaluations_classify(output, target,posclass,results[-3])
if posclass == 1:
negclass = 0;
else:
negclass = 1;
print 'Train optimized accuracy = %g' % (topacc)
print 'Train optimized phi statististic = %g' % (topphi)
print 'TP/RECALL = %g, FP = %g, PRECISION = %g' % (confusionmatrix[0,0]/pos,confusionmatrix[1,0]/neg,confusionmatrix[0,0]/(confusionmatrix[0,0]+confusionmatrix[1,0]))
print '================================'
print '|| ||%6d |%6d | ||' % (posclass,negclass)
print '================================'
print '||%3d||%6g |%6g |%6g ||' % (posclass,confusionmatrix[0,0],confusionmatrix[0,1],pos)#confusionmatrix[0,0]+confusionmatrix[0,1])
print '||%3d||%6g |%6g |%6g ||' % (negclass,confusionmatrix[1,0],confusionmatrix[1,1],neg)#confusionmatrix[1,0]+confusionmatrix[1,1])
print '||----------------------------||'
print '|| ||%6g |%6g |%6g ||' % (confusionmatrix[0,0]+confusionmatrix[1,0],confusionmatrix[0,1]+confusionmatrix[1,1],pos+neg)#confusionmatrix[1,0]+confusionmatrix[1,1])
print '================================'
if outputpredictions:
fout = open(predictionslog,'w')
for ind in xrange(len(output)):
label = output[ind]
value = target[ind]
oneinputrow = input[ind,:]
print >> fout, value, label,
for j in xrange(len(oneinputrow)):
print >> fout, '%d:%f' % (j+1,oneinputrow[j]),
print >> fout
fout.close()
del target
print 'Optimal gamma = %g\nOptimal c = %g\nOptimal Bias = %g' % (results[-1],results[-2],optphibias)
print 'Top CV results: AUC = %g, OPTIMIZED ACC = %g, OPTIMIZED PHI = %g' % (auc,topacc,topphi)
if savemodel:
param = ('-c %g -g %g -b %d' % (results[-2],results[-1],int(useprob)))
m = svm_train(output,[list(x) for x in input],param)
svm_save_model(datafilename+'.model',m)
if testdatafilename != '':
param = ('-c %g -g %g -b %d' % (results[-2],results[-1],int(useprob)))
m = svm_train(output,[list(x) for x in input],param)
pred_labels, (ACC, MSE, SCC), pred_values = svm_predict(output_test,[list(x) for x in input_test],m,'-b %d' % (int(useprob)))
ACC,PHI,confusionmatrix = mygrid.evaluations_classify(output_test, [x[0] for x in pred_values],posclass,results[-3])
db = array([[output_test[i],pred_values[i][0]] for i in range(len(output_test))])
neg = len([x for x in output_test if x != posclass])
pos = len(output_test)-neg;
pdfpages = PdfPages('%s_test.pdf' % (outputlog))
auctest = 0
if neg != 0 and pos != 0:
auctest,topacctest,optaccbias,topphitest,optphibias,top_tps_bias,top_fps = mygrid.calc_AUC(db,neg,pos,posclass,useprob,[],True,pdfpages,'Test ROC curve',results[-3])
pdfpages.close()
print 'Test accuracy = %g' % (ACC)
print 'Test Phi statistic = %g' % (PHI)
print 'TP/RECALL = %g, FP = %g, PRECISION = %g' % (confusionmatrix[0,0]/pos,confusionmatrix[1,0]/neg,confusionmatrix[0,0]/(confusionmatrix[0,0]+confusionmatrix[1,0]))
print '================================'
print '|| ||%6d |%6d | ||' % (m.get_labels()[0],m.get_labels()[1])
print '================================'
print '||%3d||%6g |%6g |%6g ||' % (m.get_labels()[0],confusionmatrix[0,0],confusionmatrix[0,1],pos)#confusionmatrix[0,0]+confusionmatrix[0,1])
print '||%3d||%6g |%6g |%6g ||' % (m.get_labels()[1],confusionmatrix[1,0],confusionmatrix[1,1],neg)#confusionmatrix[1,0]+confusionmatrix[1,1])
print '||----------------------------||'
print '|| ||%6g |%6g |%6g ||' % (confusionmatrix[0,0]+confusionmatrix[1,0],confusionmatrix[0,1]+confusionmatrix[1,1],pos+neg)#confusionmatrix[1,0]+confusionmatrix[1,1])
print '================================'
if outputlog != '':
fout = open(outputlog,'a')
print >> fout, '========================='
print >> fout, fieldnames
print >> fout, 'train: AUC=%g,ACC=%g,PHI=%g (g=%g,c=%g,bias=%g)' % (auc,topacc,topphi,results[-1],results[-2],results[-3])
if testdatafilename != '':
print >> fout, 'test: ACC=%g,AUC=%g,PHI=%g' % (ACC,auctest,PHI)
fout.close()
def princomp(A):
""" performs principal components analysis
(PCA) on the n-by-p data matrix A
Rows of A correspond to observations, columns to variables.
Returns :
coeff :
is a p-by-p matrix, each column containing coefficients
for one principal component.
score :
the principal component scores; that is, the representation
of A in the principal component space. Rows of SCORE
correspond to observations, columns to components.
latent :
a vector containing the eigenvalues
of the covariance matrix of A.
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A-mean(A.T,axis=1)).T # subtract the mean (along columns)
[latent,coeff] = linalg.eig(cov(M))
score = dot(coeff.T,M).T # projection of the data in the new space
return coeff,score,latent
def save_pca_coeffs(fn,coeff,means):
fout = open(fn,'w')
for i in xrange(size(coeff,0)):
for j in xrange(size(coeff,1)):
print >> fout, coeff[i,j],
print >> fout
for i in xrange(len(means)):
print >> fout, means[i],
fout.close()
if __name__ == '__main__':
main(sys.argv[1:])
| {
"content_hash": "c713ae4146cee1688818235913bdb0cf",
"timestamp": "",
"source": "github",
"line_count": 574,
"max_line_length": 206,
"avg_line_length": 37.66202090592334,
"alnum_prop": 0.6293366638912018,
"repo_name": "MD2Korg/memphis-dataprocessingframework",
"id": "a56ef7bb21f16e2a68cbbbc8db845f1702fa2115",
"size": "21618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/model/libsvm/libsvm/svm_classify_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "44781"
},
{
"name": "Matlab",
"bytes": "1129961"
},
{
"name": "Python",
"bytes": "112970"
}
],
"symlink_target": ""
} |
import numpy as n
import numpy.random as nr
import random as r
from python_util.util import *
from python_util.data import *
from python_util.options import *
from python_util.gpumodel import *
import sys
import math as m
import layer as lay
from convdata import ImageDataProvider, CIFARDataProvider, DummyConvNetLogRegDataProvider
from os import linesep as NL
import copy as cp
import os
from python_util.convEMdata import EMDataProvider
class Driver(object):
def __init__(self, convnet):
self.convnet = convnet
def on_start_batch(self, batch_data, train):
pass
def on_finish_batch(self):
pass
class GradCheckDriver(Driver):
def on_start_batch(self, batch_data, train):
data = batch_data[2]
self.convnet.libmodel.checkGradients(data)
class TrainingDriver(Driver):
def on_start_batch(self, batch_data, train):
data = batch_data[2]
self.convnet.libmodel.startBatch(data, self.convnet.get_progress(), not train)
class MultiviewTestDriver(TrainingDriver):
def on_start_batch(self, batch_data, train):
self.write_output = False
if train:
TrainingDriver.on_start_batch(self, batch_data, train)
else:
data = batch_data[2]
num_views = self.convnet.test_data_provider.num_views
if self.convnet.test_out != "" and self.convnet.logreg_name != "":
self.write_output = True
self.test_file_name = os.path.join(self.convnet.test_out, 'test_preds_%d' % batch_data[1])
self.probs = n.zeros((data[0].shape[1]/num_views, self.convnet.test_data_provider.get_num_classes()), dtype=n.single)
self.convnet.libmodel.startMultiviewTest(data, num_views, self.probs, self.convnet.logreg_name)
else:
self.convnet.libmodel.startMultiviewTest(data, num_views)
def on_finish_batch(self):
if self.write_output:
if not os.path.exists(self.convnet.test_out):
os.makedirs(self.convnet.test_out)
pickle(self.test_file_name, {'data': self.probs,
'note': 'generated from %s' % self.convnet.save_file})
class FeatureWriterDriver(Driver):
def __init__(self, convnet):
Driver.__init__(self, convnet)
self.last_batch = convnet.test_batch_range[-1]
def on_start_batch(self, batch_data, train):
if train:
raise ModelStateException("FeatureWriter must be used in conjunction with --test-only=1. It writes test data features.")
self.batchnum, self.data = batch_data[1], batch_data[2]
if not os.path.exists(self.convnet.feature_path):
os.makedirs(self.convnet.feature_path)
self.num_ftrs = self.convnet.layers[self.convnet.write_features]['outputs']
self.ftrs = n.zeros((self.data[0].shape[1], self.num_ftrs), dtype=n.single)
self.convnet.libmodel.startFeatureWriter(self.data, [self.ftrs], [self.convnet.write_features])
def on_finish_batch(self):
if not self.convnet.numpy_dump:
path_out = os.path.join(self.convnet.feature_path, 'data_batch_%d' % self.batchnum)
pickle(path_out, {'data': self.ftrs, 'labels': self.data[1]})
print "Wrote feature file %s" % path_out
else:
#path_out = os.path.join(self.convnet.feature_path, 'data_batch_%d' % self.batchnum)
#n.savez_compressed(path_out, data=self.ftrs, labels=self.data[1])
#n.savez(path_out, data=self.ftrs, labels=self.data[1])
# xxx - workaround, in python 2.7 both pickle and zip object on which savez rely have a 32 bit max size
path_out = os.path.join(self.convnet.feature_path, 'data_batch_data_%d' % self.batchnum)
n.save(path_out, self.ftrs)
print "Wrote feature file %s" % path_out
path_out = os.path.join(self.convnet.feature_path, 'data_batch_lbls_%d' % self.batchnum)
n.save(path_out, self.data[1])
print "Wrote feature file %s" % path_out
path_out = os.path.join(self.convnet.feature_path, 'data_batch_%d' % self.batchnum)
n.save(path_out, self.data[0])
print "Wrote feature file %s" % path_out
if self.batchnum == self.last_batch:
#pickle(os.path.join(self.convnet.feature_path, 'batches.meta'), {'source_model':self.convnet.load_file,
# 'num_vis':self.num_ftrs,
# 'batch_size': self.convnet.test_data_provider.batch_meta['batch_size']})
pickle(os.path.join(self.convnet.feature_path, 'batches.meta'), {'source_model':self.convnet.load_file,
'num_vis':self.num_ftrs,
'batch_meta': self.convnet.test_data_provider.batch_meta})
self.convnet.test_data_provider.on_finish_featurebatch(self.convnet.feature_path, self.batchnum,
self.batchnum == self.last_batch)
class ConvNet(IGPUModel):
def __init__(self, op, load_dic, dp_params={}):
filename_options = []
for v in ('color_noise', 'multiview_test', 'inner_size', 'scalar_mean', 'minibatch_size', 'em_feature_path', 'init_load_path'):
#for v in ('color_noise', 'multiview_test', 'inner_size', 'scalar_mean', 'minibatch_size', 'em_feature_path'):
dp_params[v] = op.get_value(v)
IGPUModel.__init__(self, "ConvNet", op, load_dic, filename_options, dp_params=dp_params)
def import_model(self):
lib_name = "cudaconvnet._ConvNet"
print "========================="
print "Importing %s C++ module" % lib_name
self.libmodel = __import__(lib_name,fromlist=['_ConvNet'])
def init_model_lib(self):
self.libmodel.initModel(self.layers,
self.device_ids,
self.minibatch_size,
self.conserve_mem)
def init_model_state(self):
ms = self.model_state
layers = ms['layers'] if self.loaded_from_checkpoint else {}
ms['layers'] = lay.LayerParser.parse_layers(os.path.join(self.layer_path, self.layer_def),
os.path.join(self.layer_path, self.layer_params), self, layers=layers)
self.do_decouple_conv()
self.do_unshare_weights()
self.op.set_value('conv_to_local', [], parse=False)
self.op.set_value('unshare_weights', [], parse=False)
self.set_driver()
def do_decouple_conv(self):
# Convert convolutional layers to local
if len(self.op.get_value('conv_to_local')) > 0:
for lname in self.op.get_value('conv_to_local'):
if self.model_state['layers'][lname]['type'] == 'conv':
lay.LocalLayerParser.conv_to_local(self.model_state['layers'], lname)
def do_unshare_weights(self):
# Decouple weight matrices
if len(self.op.get_value('unshare_weights')) > 0:
for name_str in self.op.get_value('unshare_weights'):
if name_str:
name = lay.WeightLayerParser.get_layer_name(name_str)
if name is not None:
name, idx = name[0], name[1]
if name not in self.model_state['layers']:
raise ModelStateException("Layer '%s' does not exist; unable to unshare" % name)
layer = self.model_state['layers'][name]
lay.WeightLayerParser.unshare_weights(layer, self.model_state['layers'], matrix_idx=idx)
else:
raise ModelStateException("Invalid layer name '%s'; unable to unshare." % name_str)
def set_driver(self):
if self.op.get_value('check_grads'):
self.driver = GradCheckDriver(self)
elif self.op.get_value('multiview_test'):
self.driver = MultiviewTestDriver(self)
elif self.op.get_value('write_features'):
self.driver = FeatureWriterDriver(self)
else:
self.driver = TrainingDriver(self)
def fill_excused_options(self):
if self.op.get_value('check_grads'):
self.op.set_value('save_path', '')
self.op.set_value('train_batch_range', '0')
self.op.set_value('test_batch_range', '0')
self.op.set_value('data_path', '')
# Make sure the data provider returned data in proper format
def parse_batch_data(self, batch_data, train=True):
if max(d.dtype != n.single for d in batch_data[2]):
raise DataProviderException("All matrices returned by data provider must consist of single-precision floats.")
return batch_data
def start_batch(self, batch_data, train=True):
self.driver.on_start_batch(batch_data, train)
def finish_batch(self):
ret = IGPUModel.finish_batch(self)
self.driver.on_finish_batch()
return ret
def print_iteration(self):
print "%d.%d (%.2f%%)..." % (self.epoch, self.batchnum, 100 * self.get_progress()),
def print_train_time(self, compute_time_py):
print "(%.3f sec)" % (compute_time_py)
def print_costs(self, cost_outputs):
costs, num_cases = cost_outputs[0], cost_outputs[1]
children = set()
for errname in costs:
if sum(errname in self.layers[z]['children'] for z in costs) == 0:
# print self.layers[errname]['children']
for child in set(self.layers[errname]['children']) & set(costs.keys()):
costs[errname] = [v + u for v, u in zip(costs[errname], costs[child])]
children.add(child)
filtered_costs = eval(self.layers[errname]['outputFilter'])(costs[errname], num_cases)
print "%s: " % errname,
if 'outputFilterFormatter' not in self.layers[errname]:
print ", ".join("%.6f" % v for v in filtered_costs),
else:
print eval(self.layers[errname]['outputFilterFormatter'])(self,filtered_costs),
if m.isnan(filtered_costs[0]) or m.isinf(filtered_costs[0]):
print "<- error nan or inf!"
sys.exit(1)
for c in children:
del costs[c]
def print_train_results(self):
self.print_costs(self.train_outputs[-1])
def print_test_status(self):
pass
def print_test_results(self):
print NL + "======================Test output======================"
self.print_costs(self.test_outputs[-1])
if not self.test_only:
print NL + "----------------------Averages-------------------------"
self.print_costs(self.aggregate_test_outputs(self.test_outputs[-len(self.test_batch_range):]))
print NL + "-------------------------------------------------------",
for name,val in sorted(self.layers.items(), key=lambda x: x[1]['id']): # This is kind of hacky but will do for now.
l = self.layers[name]
if 'weights' in l:
wscales = [(l['name'], i, n.mean(n.abs(w)), n.mean(n.abs(wi))) for i,(w,wi) in enumerate(zip(l['weights'],l['weightsInc']))]
print ""
print NL.join("Layer '%s' weights[%d]: %e [%e] [%e]" % (s[0], s[1], s[2], s[3], s[3]/s[2] if s[2] > 0 else 0) for s in wscales),
print "%sLayer '%s' biases: %e [%e]" % (NL, l['name'], n.mean(n.abs(l['biases'])), n.mean(n.abs(l['biasesInc']))),
print ""
def conditional_save(self):
self.save_state()
def aggregate_test_outputs(self, test_outputs):
test_outputs = cp.deepcopy(test_outputs)
num_cases = sum(t[1] for t in test_outputs)
for i in xrange(1 ,len(test_outputs)):
for k,v in test_outputs[i][0].items():
for j in xrange(len(v)):
test_outputs[0][0][k][j] += test_outputs[i][0][k][j]
return (test_outputs[0][0], num_cases)
@classmethod
def get_options_parser(cls):
op = IGPUModel.get_options_parser()
op.add_option("mini", "minibatch_size", IntegerOptionParser, "Minibatch size", default=128)
op.add_option("layer-def", "layer_def", StringOptionParser, "Layer definition file", set_once=False)
op.add_option("layer-params", "layer_params", StringOptionParser, "Layer parameter file")
op.add_option("layer-path", "layer_path", StringOptionParser, "Layer file path prefix", default="")
op.add_option("check-grads", "check_grads", BooleanOptionParser, "Check gradients and quit?", default=0, excuses=['data_path','save_path', 'save_file_override', 'train_batch_range','test_batch_range'])
op.add_option("multiview-test", "multiview_test", BooleanOptionParser, "Cropped DP: test on multiple patches?", default=0)
op.add_option("inner-size", "inner_size", IntegerOptionParser, "Cropped DP: crop size (0 = don't crop)", default=0, set_once=True)
op.add_option("conv-to-local", "conv_to_local", ListOptionParser(StringOptionParser), "Convert given conv layers to unshared local", default=[])
op.add_option("unshare-weights", "unshare_weights", ListOptionParser(StringOptionParser), "Unshare weight matrices in given layers", default=[])
op.add_option("conserve-mem", "conserve_mem", BooleanOptionParser, "Conserve GPU memory (slower)?", default=0)
op.add_option("color-noise", "color_noise", FloatOptionParser, "Add PCA noise to color channels with given scale", default=0.0)
op.add_option("test-out", "test_out", StringOptionParser, "Output test case predictions to given path", default="", requires=['logreg_name', 'multiview_test'])
op.add_option("logreg-name", "logreg_name", StringOptionParser, "Logreg cost layer name (for --test-out)", default="")
op.add_option("scalar-mean", "scalar_mean", FloatOptionParser, "Subtract this scalar from image (-1 = don't)", default=-1)
op.add_option("write-features", "write_features", StringOptionParser, "Write test data features from given layer", default="", requires=['feature-path'])
op.add_option("feature-path", "feature_path", StringOptionParser, "Write test data features to this path (to be used with --write-features)", default="")
# options added just for EM data parser, some override value from EM .ini file
op.add_option("em-feature-path", "em_feature_path", StringOptionParser, "Write EM recon cubes to this path (to be used with --write-features)", default="")
op.add_option("init-load-path", "init_load_path", StringOptionParser, "Path where saved weights or other saved matrix values are stored", default="")
op.add_option("use-numpy-dump", "numpy_dump", BooleanOptionParser, "Save features in numpy format (to be used with --write-features)", default=0)
op.add_option("chunk-skip-list", "chunk_skip_list", ListOptionParser(IntegerOptionParser), "Skip these random EM chunks, usually for test, override .ini", default=[])
op.add_option("dim-ordering", "dim_ordering", StringOptionParser, "Which reslice ordering for EM provider, override .ini", default="")
op.delete_option('max_test_err')
op.options["testing_freq"].default = 57
op.options["num_epochs"].default = 50000
op.options['dp_type'].default = None
DataProvider.register_data_provider('dummy-lr-n', 'Dummy ConvNet logistic regression', DummyConvNetLogRegDataProvider)
DataProvider.register_data_provider('image', 'JPEG-encoded image data provider', ImageDataProvider)
DataProvider.register_data_provider('cifar', 'CIFAR-10 data provider', CIFARDataProvider)
DataProvider.register_data_provider('emdata', 'Electron Microscopy data provider', EMDataProvider)
return op
if __name__ == "__main__":
# nr.seed(6)
op = ConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ConvNet(op, load_dic)
model.start()
| {
"content_hash": "2a4992f5985d055ca420f1ece2d2bbad",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 209,
"avg_line_length": 53.876221498371336,
"alnum_prop": 0.5918379685610641,
"repo_name": "elhuhdron/emdrp",
"id": "5bae29b6da2e9d523c3093249183c79697e4990d",
"size": "17139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "legacy/cuda-convnet2/convnet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "37754"
},
{
"name": "C++",
"bytes": "154981"
},
{
"name": "Cuda",
"bytes": "1363813"
},
{
"name": "MATLAB",
"bytes": "325043"
},
{
"name": "Makefile",
"bytes": "21180"
},
{
"name": "Python",
"bytes": "1382623"
},
{
"name": "Shell",
"bytes": "198347"
}
],
"symlink_target": ""
} |
from .services.quota_controller import QuotaControllerAsyncClient, QuotaControllerClient
from .services.service_controller import (
ServiceControllerAsyncClient,
ServiceControllerClient,
)
from .types.check_error import CheckError
from .types.distribution import Distribution
from .types.http_request import HttpRequest
from .types.log_entry import LogEntry, LogEntryOperation, LogEntrySourceLocation
from .types.metric_value import MetricValue, MetricValueSet
from .types.operation import Operation
from .types.quota_controller import (
AllocateQuotaRequest,
AllocateQuotaResponse,
QuotaError,
QuotaOperation,
)
from .types.service_controller import (
CheckRequest,
CheckResponse,
ReportRequest,
ReportResponse,
)
__all__ = (
"QuotaControllerAsyncClient",
"ServiceControllerAsyncClient",
"AllocateQuotaRequest",
"AllocateQuotaResponse",
"CheckError",
"CheckRequest",
"CheckResponse",
"Distribution",
"HttpRequest",
"LogEntry",
"LogEntryOperation",
"LogEntrySourceLocation",
"MetricValue",
"MetricValueSet",
"Operation",
"QuotaControllerClient",
"QuotaError",
"QuotaOperation",
"ReportRequest",
"ReportResponse",
"ServiceControllerClient",
)
| {
"content_hash": "1ec0febfc4f90532c1761b5537fe4896",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 88,
"avg_line_length": 26.95744680851064,
"alnum_prop": 0.7434885556432518,
"repo_name": "googleapis/python-service-control",
"id": "a214a2eb43a778f199aac1744ae62d76c1e12609",
"size": "1868",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/servicecontrol_v1/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "517600"
},
{
"name": "Shell",
"bytes": "30687"
}
],
"symlink_target": ""
} |
import json
import os
import sys
import getopt
from jupyter_client.kernelspec import KernelSpecManager
from IPython.utils.tempdir import TemporaryDirectory
kernel_json = {"argv":[sys.executable,"-m","kdbq_kernel", "-f", "{connection_file}"],
"display_name":"Kdb/Q",
"language":"q",
"codemirror_mode":"Q",
"env":{"PS1": "$"}
}
def install_my_kernel_spec(user=True, prefix=None):
with TemporaryDirectory() as td:
os.chmod(td, 0o755) # Starts off as 700, not user readable
with open(os.path.join(td, 'kernel.json'), 'w') as f:
json.dump(kernel_json, f, sort_keys=True)
# TODO: Copy resources once they're specified
print('Installing IPython kernel spec')
KernelSpecManager().install_kernel_spec(td, 'kdbq', user=user, replace=True, prefix=prefix)
def _is_root():
try:
return os.geteuid() == 0
except AttributeError:
return False # assume not an admin on non-Unix platforms
def main(argv=[]):
prefix = None
user = not _is_root()
opts, _ = getopt.getopt(argv[1:], '', ['user', 'prefix='])
for k, v in opts:
if k == '--user':
user = True
elif k == '--prefix':
prefix = v
user = False
install_my_kernel_spec(user=user, prefix=prefix)
if __name__ == '__main__':
main(argv=sys.argv)
| {
"content_hash": "3a7a4f37eb2ca84eb590f116d56c38dd",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 99,
"avg_line_length": 28.574468085106382,
"alnum_prop": 0.6120625465376024,
"repo_name": "newtux/KdbQ_kernel",
"id": "c80ed2f79561e8396c3d1c02c7f267409012e049",
"size": "1343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kdbq_kernel/install.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12112"
}
],
"symlink_target": ""
} |
import datetime
# Django
import django
from django import forms
from django.db import models
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
SECS_PER_DAY=3600*24
class TimedeltaField(models.Field):
u'''
Store Python's datetime.timedelta in an integer column.
Most database systems only support 32 bit integers by default.
'''
__metaclass__ = models.SubfieldBase
empty_strings_allowed = False
def __init__(self, *args, **kwargs):
super(TimedeltaField, self).__init__(*args, **kwargs)
def to_python(self, value):
if (value is None) or isinstance(value, datetime.timedelta):
return value
try:
# else try to convert to int (e.g. from string)
value = int(value)
except (TypeError, ValueError):
raise django.core.exceptions.ValidationError(
_("This value must be an integer or a datetime.timedelta."))
return datetime.timedelta(seconds=value)
def get_internal_type(self):
return 'IntegerField'
def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=False):
raise NotImplementedError() # SQL WHERE
def get_db_prep_save(self, value, connection=None, prepared=False):
if (value is None) or isinstance(value, int):
return value
return SECS_PER_DAY*value.days+value.seconds
def formfield(self, *args, **kwargs):
defaults={'form_class': TimedeltaFormField}
defaults.update(kwargs)
return super(TimedeltaField, self).formfield(*args, **defaults)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value)
class TimedeltaFormField(forms.Field):
default_error_messages = {
'invalid': _(u'Enter a whole number.'),
}
def __init__(self, *args, **kwargs):
defaults={'widget': TimedeltaWidget}
defaults.update(kwargs)
super(TimedeltaFormField, self).__init__(*args, **defaults)
def clean(self, value):
# value comes from Timedelta.Widget.value_from_datadict(): tuple of strings
super(TimedeltaFormField, self).clean(value)
assert len(value)==len(self.widget.inputs), (value, self.widget.inputs)
i=0
for value, multiply in zip(value, self.widget.multiply):
try:
i+=int(value)*multiply
except (ValueError, TypeError):
raise forms.ValidationError(self.error_messages['invalid'])
return i
class TimedeltaWidget(forms.Widget):
INPUTS=['days', 'hours', 'minutes', 'seconds']
MULTIPLY=[60*60*24, 60*60, 60, 1]
def __init__(self, attrs=None):
self.widgets=[]
if not attrs:
attrs={}
inputs=attrs.get('inputs', self.INPUTS)
multiply=[]
for input in inputs:
assert input in self.INPUTS, (input, self.INPUT)
self.widgets.append(forms.TextInput(attrs=attrs))
multiply.append(self.MULTIPLY[self.INPUTS.index(input)])
self.inputs=inputs
self.multiply=multiply
super(TimedeltaWidget, self).__init__(attrs)
def render(self, name, value, attrs):
if value is None:
values=[0 for i in self.inputs]
elif isinstance(value, datetime.timedelta):
values=split_seconds(value.days*SECS_PER_DAY+value.seconds, self.inputs, self.multiply)
elif isinstance(value, int):
# initial data from model
values=split_seconds(value, self.inputs, self.multiply)
else:
assert isinstance(value, tuple), (value, type(value))
assert len(value)==len(self.inputs), (value, self.inputs)
values=value
id=attrs.pop('id')
assert not attrs, attrs
rendered=[]
for input, widget, val in zip(self.inputs, self.widgets, values):
rendered.append(u'%s %s' % (_(input), widget.render('%s_%s' % (name, input), val)))
return mark_safe('<div id="%s">%s</div>' % (id, ' '.join(rendered)))
def value_from_datadict(self, data, files, name):
# Don't throw ValidationError here, just return a tuple of strings.
ret=[]
for input, multi in zip(self.inputs, self.multiply):
ret.append(data.get('%s_%s' % (name, input), 0))
return tuple(ret)
def _has_changed(self, initial_value, data_value):
# data_value comes from value_from_datadict(): A tuple of strings.
if initial_value is None:
return bool(set(data_value)!=set([u'0']))
assert isinstance(initial_value, datetime.timedelta), initial_value
initial=tuple([unicode(i) for i in split_seconds(initial_value.days*SECS_PER_DAY+initial_value.seconds, self.inputs, self.multiply)])
assert len(initial)==len(data_value), (initial, data_value)
return bool(initial!=data_value)
def main():
assert split_seconds(1000000)==[11, 13, 46, 40]
field=TimedeltaField()
td=datetime.timedelta(days=10, seconds=11)
s=field.get_db_prep_save(td)
assert isinstance(s, int), (s, type(s))
td_again=field.to_python(s)
assert td==td_again, (td, td_again)
td=datetime.timedelta(seconds=11)
s=field.get_db_prep_save(td)
td_again=field.to_python(s)
assert td==td_again, (td, td_again)
field=TimedeltaFormField()
assert field.widget._has_changed(datetime.timedelta(seconds=0), (u'0', u'0', u'0', u'0',)) is False
assert field.widget._has_changed(None, (u'0', u'0', u'0', u'0',)) is False
assert field.widget._has_changed(None, (u'0', u'0')) is False
assert field.widget._has_changed(datetime.timedelta(days=1, hours=2, minutes=3, seconds=4), (u'1', u'2', u'3', u'4',)) is False
for secs, soll, kwargs in [
(100, [0, 0, 1, 40], dict()),
(100, ['0days', '0hours', '1minutes', '40seconds'], dict(with_unit=True)),
(100, ['1minutes', '40seconds'], dict(with_unit=True, remove_leading_zeros=True)),
(100000, ['1days', '3hours'], dict(inputs=['days', 'hours'], with_unit=True, remove_leading_zeros=True)),
]:
ist=split_seconds(secs, **kwargs)
if ist!=soll:
raise Exception('geg=%s soll=%s ist=%s kwargs=%s' % (secs, soll, ist, kwargs))
print "unittest OK"
def split_seconds(secs, inputs=TimedeltaWidget.INPUTS, multiply=TimedeltaWidget.MULTIPLY,
with_unit=False, remove_leading_zeros=False):
ret=[]
assert len(inputs)<=len(multiply), (inputs, multiply)
for input, multi in zip(inputs, multiply):
count, secs = divmod(secs, multi)
if remove_leading_zeros and not ret and not count:
continue
if with_unit:
ret.append('%s%s' % (count, input))
else:
ret.append(count)
return ret
if __name__=='__main__':
main()
| {
"content_hash": "9d71d97d7d7b31e57bd36524e59697bf",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 141,
"avg_line_length": 38.33701657458563,
"alnum_prop": 0.6189652687707162,
"repo_name": "wpjesus/codematch",
"id": "234b670d8bdf53af60db56917751fa0262efcd44",
"size": "7189",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "ietf/meeting/timedeltafield.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "139492"
},
{
"name": "CSS",
"bytes": "733662"
},
{
"name": "Groff",
"bytes": "2349"
},
{
"name": "HTML",
"bytes": "2149789"
},
{
"name": "JavaScript",
"bytes": "1003699"
},
{
"name": "Makefile",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "17323"
},
{
"name": "PostScript",
"bytes": "35"
},
{
"name": "PowerShell",
"bytes": "468"
},
{
"name": "Python",
"bytes": "4536908"
},
{
"name": "Shell",
"bytes": "74113"
},
{
"name": "TeX",
"bytes": "2556"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, absolute_import, unicode_literals
from ufo2ft.fontInfoData import getAttrWithFallback
from ufo2ft.filters import BaseFilter
from cu2qu.ufo import DEFAULT_MAX_ERR, CURVE_TYPE_LIB_KEY
from cu2qu.pens import Cu2QuPointPen
import logging
logger = logging.getLogger(__name__)
class CubicToQuadraticFilter(BaseFilter):
_kwargs = {
"conversionError": None,
"reverseDirection": True,
"rememberCurveType": False,
}
def set_context(self, font, glyphSet):
ctx = super(CubicToQuadraticFilter, self).set_context(font, glyphSet)
relativeError = self.options.conversionError or DEFAULT_MAX_ERR
ctx.absoluteError = relativeError * getAttrWithFallback(font.info, "unitsPerEm")
ctx.stats = {}
return ctx
def __call__(self, font, glyphSet=None):
if self.options.rememberCurveType:
# check first in the global font lib, then in layer lib
for lib in (font.lib, getattr(glyphSet, "lib", {})):
curve_type = lib.get(CURVE_TYPE_LIB_KEY, "cubic")
if curve_type == "quadratic":
logger.info("Curves already converted to quadratic")
return set()
elif curve_type == "cubic":
pass # keep converting
else:
raise NotImplementedError(curve_type)
modified = super(CubicToQuadraticFilter, self).__call__(font, glyphSet)
if modified:
stats = self.context.stats
logger.info(
"New spline lengths: %s"
% (", ".join("%s: %d" % (l, stats[l]) for l in sorted(stats.keys())))
)
if self.options.rememberCurveType:
# 'lib' here is the layer's lib, as defined in for loop variable
curve_type = lib.get(CURVE_TYPE_LIB_KEY, "cubic")
if curve_type != "quadratic":
lib[CURVE_TYPE_LIB_KEY] = "quadratic"
return modified
def filter(self, glyph):
if not len(glyph):
return False
pen = Cu2QuPointPen(
glyph.getPointPen(),
self.context.absoluteError,
reverse_direction=self.options.reverseDirection,
stats=self.context.stats,
)
contours = list(glyph)
glyph.clearContours()
for contour in contours:
contour.drawPoints(pen)
return True
| {
"content_hash": "b601654a4b74618674a8bfdb79b8221a",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 88,
"avg_line_length": 33.13333333333333,
"alnum_prop": 0.5907444668008048,
"repo_name": "jamesgk/ufo2ft",
"id": "6347f4db664b52d3b11a74c861d3e5e2a8a87fa9",
"size": "2485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/ufo2ft/filters/cubicToQuadratic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77326"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
from django.db.backends.mysql.base import DatabaseWrapper
from django.db.migrations.recorder import MigrationRecorder
from core import Core
from core.libs.install import get_installation_status, install_database
class Initialize(Core, AppConfig):
name = 'core'
def ready(self):
DatabaseWrapper._data_types['AutoField'] = 'integer UNSIGNED AUTO_INCREMENT'
MigrationRecorder.Migration._meta.db_table = self.settings.DB_TABLE_PREFIX + MigrationRecorder.Migration._meta.db_table
Core.installation_status = get_installation_status()
if self.installation_status == -2:
install_database()
Core.installation_status = -1
| {
"content_hash": "2d2a50dcd65e112b578df622e642df5a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 127,
"avg_line_length": 35.8,
"alnum_prop": 0.7332402234636871,
"repo_name": "jat001/JADB",
"id": "4aef833abac84b0cedad486a55b93304e4a3c9cc",
"size": "797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1145"
},
{
"name": "Python",
"bytes": "30662"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import ipaddress
from django.conf import settings
class UseXForwardedFor(object):
def process_request(self, request):
"""
Update request.META['REMOTE_ADDR'] to use the address from the X-Forwarded-For header
"""
if getattr(settings, 'USE_X_FORWARDED_FOR', False) is True:
try:
ip_addresses = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
pass
else:
for ip_address in ip_addresses.split(', '):
try:
ipaddress.ip_address(ip_address)
except ValueError:
pass
else:
request.META['REMOTE_ADDR'] = ip_address
break
| {
"content_hash": "050bf41a2a486b0ba64a18e7c6cb2dac",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 93,
"avg_line_length": 32.15384615384615,
"alnum_prop": 0.507177033492823,
"repo_name": "kellengreen/django-use_x_forwarded_for",
"id": "d4b648ae47b6279429f29c679bdc00547339ebde",
"size": "836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "use_x_forwarded_for/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2658"
}
],
"symlink_target": ""
} |
"""Installation script for setuptools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup
EXTRA_PACKAGES = {
'tf': ['tensorflow>=1.14'],
'tf_gpu': ['tensorflow-gpu>=1.14'],
}
setup(
name='multi_object_datasets',
version='1.0.0',
author='DeepMind',
license='Apache License, Version 2.0',
description=('Multi-object image datasets with'
'ground-truth segmentation masks and generative factors.'),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
keywords=['datasets', 'machine learning', 'representation learning'],
url='https://github.com/deepmind/multi_object_datasets',
packages=['multi_object_datasets'],
package_dir={'multi_object_datasets': '.'},
extras_require=EXTRA_PACKAGES,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| {
"content_hash": "0be06d43b75e08cb8c616f754a030de0",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 76,
"avg_line_length": 35,
"alnum_prop": 0.6408163265306123,
"repo_name": "deepmind/multi_object_datasets",
"id": "b14f7b207dc6023b6456b8b41f665137e2277bcf",
"size": "2164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24104"
}
],
"symlink_target": ""
} |
from sfc_models.objects import *
from sfc_models.examples.Quick2DPlot import Quick2DPlot
register_standard_logs('output', __file__)
mod = Model()
country = Country(mod, 'CO')
Household(country, 'HH')
ConsolidatedGovernment(country, 'GOV')
FixedMarginBusiness(country, 'BUS', profit_margin=.025)
Market(country, 'GOOD')
Market(country, 'LAB')
TaxFlow(country, 'TAX', taxrate=.2)
# At time period 25, cut spending to 17 (from 20)
mod.AddExogenous('GOV', 'DEM_GOOD', [20.,]* 25 + [17.,]*20)
mod.AddGlobalEquation('DEBT_GDP', 'DEBT-TO-GDP RATIO', '-100.*GOV__F/BUS__SUP_GOOD')
mod.AddGlobalEquation('DEFICIT', 'DEFICIT', '-1.*GOV__INC')
mod.EquationSolver.MaxTime = 40
mod.main()
k = mod.GetTimeSeries('k')
Rat = mod.GetTimeSeries('DEBT_GDP')
Def = mod.GetTimeSeries('GOV__INC')
spend = mod.GetTimeSeries('GOV__DEM_GOOD')
p = Quick2DPlot([k, k], [spend, Def], title='Spending and Deficit', filename='intro_X_XX_multiplier_deficit.png',
run_now=False)
p.Legend = ['G', 'Deficit']
p.LegendPos = 'center left'
p.DoPlot()
Quick2DPlot(k, Rat, title='Debt-to-GDP Ratio', filename='intro_X_XX_multiplier_debt_gdp.png')
| {
"content_hash": "a868713e1bfa301c2462ff6af1739dad",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 113,
"avg_line_length": 36.38709677419355,
"alnum_prop": 0.6985815602836879,
"repo_name": "brianr747/SFC_models",
"id": "d4fd04698f7477aacd1d458ba68e94970c4579ef",
"size": "1143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sfc_models/examples/scripts/intro_X_XX_sim_multiplier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "137"
},
{
"name": "Python",
"bytes": "433125"
}
],
"symlink_target": ""
} |
import unittest
import logging
import tkp.db.model
from tkp.testutil.alchemy import gen_band, gen_dataset, gen_skyregion,\
gen_lightcurve
import tkp.db
import tkp.db.alchemy
from tkp.steps.varmetric import execute_store_varmetric
logging.basicConfig(level=logging.INFO)
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARNING)
class TestApi(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.db = tkp.db.Database()
cls.db.connect()
def setUp(self):
self.session = self.db.Session()
band = gen_band(central=150**6)
self.dataset = gen_dataset('test varmetric step')
skyregion = gen_skyregion(self.dataset)
lightcurve = gen_lightcurve(band, self.dataset, skyregion)
self.session.add_all(lightcurve)
self.session.flush()
self.session.commit()
def test_execute_store_varmetric(self):
session = self.db.Session()
execute_store_varmetric(session=session, dataset_id=self.dataset.id)
| {
"content_hash": "8807082e6545b6147bb3b3dc8e0472fd",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 76,
"avg_line_length": 26.789473684210527,
"alnum_prop": 0.6944990176817288,
"repo_name": "mkuiack/tkp",
"id": "796476466929755a1a44ccfe2f22da6431e45448",
"size": "1018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_steps/test_varmetric.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PLpgSQL",
"bytes": "18823"
},
{
"name": "Python",
"bytes": "903657"
},
{
"name": "Shell",
"bytes": "588"
}
],
"symlink_target": ""
} |
"""
Local file system storage.
"""
import errno
from hashlib import md5
import io
import os
from ._compat import urlopen
from .thumb import Thumb
def make_dirs(path):
try:
os.makedirs(os.path.dirname(path))
except (OSError) as e:
if e.errno != errno.EEXIST:
raise
return path
class Storage(object):
def __init__(self, base_path, base_url='/', thumbsdir='t', out_path=None):
self.base_path = base_path.rstrip('/')
self.base_url = base_url.rstrip('/') or '/'
self.thumbsdir = thumbsdir
self.out_path = (out_path or self.base_path).rstrip('/')
super(self.__class__, self).__init__()
def get_key(self, path, geometry, filters, options):
"""Generates the thumbnail's key from it's arguments.
If the arguments doesn't change the key will not change
"""
seed = u' '.join([
str(path),
str(geometry),
str(filters),
str(options),
]).encode('utf8')
return md5(seed).hexdigest()
def get_source(self, path_or_url):
"""Returns the source image file descriptor.
path_or_url:
Path to the source image as an absolute path, a path relative
to `self.base_path` or a URL beginning with `http[s]`
"""
if path_or_url.startswith(('http://', 'https://')):
try:
return urlopen(path_or_url)
except IOError:
return None
fullpath = path_or_url
if not os.path.isabs(path_or_url):
fullpath = os.path.join(self.base_path, path_or_url)
try:
return io.open(fullpath, 'rb')
except IOError:
return None
def get_thumb(self, path, key, format):
"""Get the stored thumbnail if exists.
path:
path of the source image
key:
key of the thumbnail
format:
thumbnail's file extension
"""
thumbpath = self.get_thumbpath(path, key, format)
fullpath = os.path.join(self.out_path, thumbpath)
if os.path.isfile(fullpath):
url = self.get_url(thumbpath)
return Thumb(url, key)
return Thumb()
def get_thumbpath(self, path, key, format):
"""Return the relative path of the thumbnail.
path:
path of the source image
key:
key of the thumbnail
format:
thumbnail file extension
"""
relpath = os.path.dirname(path)
thumbsdir = self.get_thumbsdir(path)
name, _ = os.path.splitext(os.path.basename(path))
name = '{}.{}.{}'.format(name, key, format.lower())
return os.path.join(relpath, thumbsdir, name)
def get_thumbsdir(self, path):
"""
path:
path of the source image
"""
# Thumbsdir could be a callable
# In that case, the path is built on the fly, based on the source path
thumbsdir = self.thumbsdir
if callable(self.thumbsdir):
thumbsdir = self.thumbsdir(path)
return thumbsdir
def save(self, path, key, format, data):
"""Save a newly generated thumbnail.
path:
path of the source image
key:
key of the thumbnail
format:
thumbnail's file extension
data:
thumbnail's binary data
"""
thumbpath = self.get_thumbpath(path, key, format)
fullpath = os.path.join(self.out_path, thumbpath)
self.save_thumb(fullpath, data)
url = self.get_url(thumbpath)
thumb = Thumb(url, key, fullpath)
return thumb
def save_thumb(self, fullpath, data):
make_dirs(fullpath)
with io.open(fullpath, 'wb') as f:
f.write(data)
def get_url(self, thumbpath):
return os.path.join(self.base_url, thumbpath.strip('/'))
| {
"content_hash": "87bb4675737beca741083f7b3653bc3e",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 78,
"avg_line_length": 29.095588235294116,
"alnum_prop": 0.5569876168814759,
"repo_name": "lucuma/moar",
"id": "ee34342bcd5ff88b5029e7fa8ab2862b937645ba",
"size": "3972",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moar/storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "628"
},
{
"name": "Python",
"bytes": "53077"
}
],
"symlink_target": ""
} |
"""Cloudpipe interface."""
from novaclient import base
class Cloudpipe(base.Resource):
"""A cloudpipe instance is a VPN attached to a project's VLAN."""
def __repr__(self):
return "<Cloudpipe: %s>" % self.project_id
def delete(self):
"""
Delete the own cloudpipe instance
:returns: An instance of novaclient.base.TupleWithMeta
"""
return self.manager.delete(self)
class CloudpipeManager(base.ManagerWithFind):
resource_class = Cloudpipe
def create(self, project):
"""Launch a cloudpipe instance.
:param project: UUID of the project (tenant) for the cloudpipe
"""
body = {'cloudpipe': {'project_id': project}}
return self._create('/os-cloudpipe', body, 'instance_id',
return_raw=True)
def list(self):
"""Get a list of cloudpipe instances."""
return self._list('/os-cloudpipe', 'cloudpipes')
def update(self, address, port):
"""Configure cloudpipe parameters for the project.
Update VPN address and port for all networks associated
with the project defined by authentication
:param address: IP address
:param port: Port number
:returns: An instance of novaclient.base.TupleWithMeta
"""
body = {'configure_project': {'vpn_ip': address,
'vpn_port': port}}
return self._update("/os-cloudpipe/configure-project", body)
| {
"content_hash": "f7ecfbbdfce830cfbae73dff771740a3",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 70,
"avg_line_length": 29.96,
"alnum_prop": 0.6054739652870494,
"repo_name": "xuweiliang/Codelibrary",
"id": "cf8040b1764cc5a62f2059c1925673f9e8cf7d0e",
"size": "2134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "novaclient/v2/cloudpipe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "134284"
},
{
"name": "HTML",
"bytes": "830844"
},
{
"name": "JavaScript",
"bytes": "2421484"
},
{
"name": "Makefile",
"bytes": "4934"
},
{
"name": "Python",
"bytes": "17185807"
},
{
"name": "Shell",
"bytes": "9144"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Company(models.Model):
name = models.CharField(max_length=30)
public = models.BooleanField(default=False)
class PetStore(models.Model):
name = models.CharField(max_length=30)
class Person(models.Model):
name = models.CharField(max_length=30)
hobbies = models.CharField(max_length=30)
employer = models.ForeignKey(Company, on_delete=models.CASCADE)
class Pet(models.Model):
name = models.CharField(max_length=30)
toys = models.CharField(max_length=30)
species = models.CharField(max_length=30)
owner = models.ForeignKey(Person, on_delete=models.CASCADE)
sold_from = models.ForeignKey(PetStore, null=True, on_delete=models.CASCADE)
diet = models.CharField(max_length=200)
class TaggedItem(models.Model):
tag = models.SlugField()
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id') | {
"content_hash": "b38d32bbfd30886e20278e6bb78f7c3d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 80,
"avg_line_length": 32.72222222222222,
"alnum_prop": 0.7495755517826825,
"repo_name": "rsinger86/drf-flex-fields",
"id": "df01d9d339a30978632622ca62ad32437c07fd60",
"size": "1178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testapp/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59871"
}
],
"symlink_target": ""
} |
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
from django.http import Http404
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from snisi_core.permissions import provider_allowed_or_denied
from snisi_core.models.Projects import Cluster
from snisi_core.models.Periods import MonthPeriod, Period
from snisi_core.models.Entities import Entity
from snisi_cataract.models import CATMissionR
from snisi_web.utils import (entity_browser_context, get_base_url_for_period,
get_base_url_for_periods,
ensure_entity_in_cluster,
ensure_entity_at_least)
logger = logging.getLogger(__name__)
@login_required
def cataract_mission_browser(request,
entity_slug=None,
period_str=None,
**kwargs):
context = {}
root = request.user.location
cluster = Cluster.get_or_none('cataract')
entity = Entity.get_or_none(entity_slug)
if entity is None:
entity = root
if entity is None:
raise Http404("Aucune entité pour le code {}".format(entity_slug))
# make sure requested entity is in cluster
ensure_entity_in_cluster(cluster, entity)
# check permissions on this entity and raise 403
provider_allowed_or_denied(request.user, 'access_cataract', entity)
# mission browser is reserved to district-level and above
ensure_entity_at_least(entity, 'health_district')
def period_from_strid(period_str, reportcls=None):
period = None
if period_str:
try:
period = Period.from_url_str(period_str).casted()
except:
pass
return period
period = period_from_strid(period_str)
if period is None:
period = MonthPeriod.current()
try:
first_period = MonthPeriod.find_create_by_date(
CATMissionR.objects.all()
.order_by('period__start_on')[0].period.middle())
except IndexError:
first_period = MonthPeriod.current()
all_periods = MonthPeriod.all_from(first_period)
context.update({
'all_periods': [(p.strid(), p) for p in reversed(all_periods)],
'period': period,
'base_url': get_base_url_for_period(
view_name='cataract_missions', entity=entity,
period_str=period_str or period.strid())
})
context.update(entity_browser_context(
root=root, selected_entity=entity,
full_lineage=['country', 'health_region', 'health_district'],
cluster=cluster))
# retrieve list of missions for that period
missions = CATMissionR.objects.filter(
period=period,
entity__slug__in=[e.slug for e in entity.get_health_districts()])
context.update({'missions': missions})
return render(request,
kwargs.get('template_name', 'cataract/missions_list.html'),
context)
@login_required
def cataract_mission_viewer(request, report_receipt, **kwargs):
context = {}
mission = CATMissionR.get_or_none(report_receipt)
if mission is None:
return Http404("Nº de reçu incorrect : {}".format(report_receipt))
context.update({'mission': mission})
return render(request,
kwargs.get('template_name', 'cataract/mission_detail.html'),
context)
@login_required
def cataract_dashboard(request,
entity_slug=None,
perioda_str=None,
periodb_str=None,
**kwargs):
context = {}
root = request.user.location
cluster = Cluster.get_or_none('cataract')
entity = Entity.get_or_none(entity_slug)
if entity is None:
entity = root
if entity is None:
raise Http404("Aucune entité pour le code {}".format(entity_slug))
# make sure requested entity is in cluster
ensure_entity_in_cluster(cluster, entity)
# check permissions on this entity and raise 403
provider_allowed_or_denied(request.user, 'access_cataract', entity)
# mission browser is reserved to district-level and above
ensure_entity_at_least(entity, 'health_district')
def period_from_strid(period_str, reportcls=None):
period = None
if period_str:
try:
period = Period.from_url_str(period_str).casted()
except:
pass
return period
perioda = period_from_strid(perioda_str)
periodb = period_from_strid(periodb_str)
if periodb is None:
periodb = MonthPeriod.current()
if perioda is None:
perioda = periodb
if perioda is None or periodb is None:
raise Http404("Période incorrecte.")
if perioda > periodb:
t = perioda
perioda = periodb
periodb = t
del(t)
try:
first_period = MonthPeriod.find_create_by_date(
CATMissionR.objects.all().order_by(
'period__start_on')[0].period.middle())
except IndexError:
first_period = MonthPeriod.current()
all_periods = MonthPeriod.all_from(first_period)
periods = MonthPeriod.all_from(perioda, periodb)
context.update({
'all_periods': [(p.strid(), p) for p in reversed(all_periods)],
'periods': periods,
'perioda': perioda,
'periodb': periodb,
'base_url': get_base_url_for_periods(
view_name='cataract_dashboard',
entity=entity,
perioda_str=perioda_str or perioda.strid(),
periodb_str=periodb_str or periodb.strid())
})
context.update(entity_browser_context(
root=root, selected_entity=entity,
full_lineage=['country', 'health_region', 'health_district'],
cluster=cluster))
# retrieve Indicator Table
from snisi_cataract.indicators import (MissionDataSummary,
CumulativeSurgeryData)
missions_followup = MissionDataSummary(entity=entity,
periods=periods)
cumulative_surgeries = CumulativeSurgeryData(entity=entity,
periods=periods)
context.update({
'missions_followup': missions_followup,
'cumulative_surgeries': cumulative_surgeries,
})
return render(request,
kwargs.get('template_name', 'cataract/dashboard.html'),
context)
| {
"content_hash": "022a74272dc0e6f63f0722db943d42f9",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 78,
"avg_line_length": 32.80099502487562,
"alnum_prop": 0.6103443045654482,
"repo_name": "yeleman/snisi",
"id": "d030ad11b454947423a67df0f732fc2f1c84029f",
"size": "6677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snisi_cataract/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "410022"
},
{
"name": "HTML",
"bytes": "1007275"
},
{
"name": "Java",
"bytes": "7211"
},
{
"name": "JavaScript",
"bytes": "292583"
},
{
"name": "Python",
"bytes": "2237855"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
} |
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_pdq', [dirname(__file__)])
except ImportError:
import _pdq
return _pdq
if fp is not None:
try:
_mod = imp.load_module('_pdq', fp, pathname, description)
finally:
fp.close()
return _mod
_pdq = swig_import_helper()
del swig_import_helper
else:
import _pdq
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
TRUE = _pdq.TRUE
FALSE = _pdq.FALSE
MAXNODES = _pdq.MAXNODES
MAXBUF = _pdq.MAXBUF
MAXSTREAMS = _pdq.MAXSTREAMS
MAXCHARS = _pdq.MAXCHARS
VOID = _pdq.VOID
OPEN = _pdq.OPEN
CLOSED = _pdq.CLOSED
MEM = _pdq.MEM
CEN = _pdq.CEN
DLY = _pdq.DLY
MSQ = _pdq.MSQ
ISRV = _pdq.ISRV
FCFS = _pdq.FCFS
PSHR = _pdq.PSHR
LCFS = _pdq.LCFS
TERM = _pdq.TERM
TRANS = _pdq.TRANS
BATCH = _pdq.BATCH
EXACT = _pdq.EXACT
APPROX = _pdq.APPROX
CANON = _pdq.CANON
VISITS = _pdq.VISITS
DEMAND = _pdq.DEMAND
PDQ_SP = _pdq.PDQ_SP
PDQ_MP = _pdq.PDQ_MP
TOL = _pdq.TOL
class SYSTAT_TYPE(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SYSTAT_TYPE, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SYSTAT_TYPE, name)
__repr__ = _swig_repr
__swig_setmethods__["response"] = _pdq.SYSTAT_TYPE_response_set
__swig_getmethods__["response"] = _pdq.SYSTAT_TYPE_response_get
if _newclass:response = _swig_property(_pdq.SYSTAT_TYPE_response_get, _pdq.SYSTAT_TYPE_response_set)
__swig_setmethods__["thruput"] = _pdq.SYSTAT_TYPE_thruput_set
__swig_getmethods__["thruput"] = _pdq.SYSTAT_TYPE_thruput_get
if _newclass:thruput = _swig_property(_pdq.SYSTAT_TYPE_thruput_get, _pdq.SYSTAT_TYPE_thruput_set)
__swig_setmethods__["residency"] = _pdq.SYSTAT_TYPE_residency_set
__swig_getmethods__["residency"] = _pdq.SYSTAT_TYPE_residency_get
if _newclass:residency = _swig_property(_pdq.SYSTAT_TYPE_residency_get, _pdq.SYSTAT_TYPE_residency_set)
__swig_setmethods__["physmem"] = _pdq.SYSTAT_TYPE_physmem_set
__swig_getmethods__["physmem"] = _pdq.SYSTAT_TYPE_physmem_get
if _newclass:physmem = _swig_property(_pdq.SYSTAT_TYPE_physmem_get, _pdq.SYSTAT_TYPE_physmem_set)
__swig_setmethods__["highwater"] = _pdq.SYSTAT_TYPE_highwater_set
__swig_getmethods__["highwater"] = _pdq.SYSTAT_TYPE_highwater_get
if _newclass:highwater = _swig_property(_pdq.SYSTAT_TYPE_highwater_get, _pdq.SYSTAT_TYPE_highwater_set)
__swig_setmethods__["malloc"] = _pdq.SYSTAT_TYPE_malloc_set
__swig_getmethods__["malloc"] = _pdq.SYSTAT_TYPE_malloc_get
if _newclass:malloc = _swig_property(_pdq.SYSTAT_TYPE_malloc_get, _pdq.SYSTAT_TYPE_malloc_set)
__swig_setmethods__["mpl"] = _pdq.SYSTAT_TYPE_mpl_set
__swig_getmethods__["mpl"] = _pdq.SYSTAT_TYPE_mpl_get
if _newclass:mpl = _swig_property(_pdq.SYSTAT_TYPE_mpl_get, _pdq.SYSTAT_TYPE_mpl_set)
__swig_setmethods__["maxN"] = _pdq.SYSTAT_TYPE_maxN_set
__swig_getmethods__["maxN"] = _pdq.SYSTAT_TYPE_maxN_get
if _newclass:maxN = _swig_property(_pdq.SYSTAT_TYPE_maxN_get, _pdq.SYSTAT_TYPE_maxN_set)
__swig_setmethods__["maxTP"] = _pdq.SYSTAT_TYPE_maxTP_set
__swig_getmethods__["maxTP"] = _pdq.SYSTAT_TYPE_maxTP_get
if _newclass:maxTP = _swig_property(_pdq.SYSTAT_TYPE_maxTP_get, _pdq.SYSTAT_TYPE_maxTP_set)
__swig_setmethods__["minRT"] = _pdq.SYSTAT_TYPE_minRT_set
__swig_getmethods__["minRT"] = _pdq.SYSTAT_TYPE_minRT_get
if _newclass:minRT = _swig_property(_pdq.SYSTAT_TYPE_minRT_get, _pdq.SYSTAT_TYPE_minRT_set)
def __init__(self):
this = _pdq.new_SYSTAT_TYPE()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pdq.delete_SYSTAT_TYPE
__del__ = lambda self : None;
SYSTAT_TYPE_swigregister = _pdq.SYSTAT_TYPE_swigregister
SYSTAT_TYPE_swigregister(SYSTAT_TYPE)
cvar = _pdq.cvar
class TERMINAL_TYPE(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TERMINAL_TYPE, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TERMINAL_TYPE, name)
__repr__ = _swig_repr
__swig_setmethods__["name"] = _pdq.TERMINAL_TYPE_name_set
__swig_getmethods__["name"] = _pdq.TERMINAL_TYPE_name_get
if _newclass:name = _swig_property(_pdq.TERMINAL_TYPE_name_get, _pdq.TERMINAL_TYPE_name_set)
__swig_setmethods__["pop"] = _pdq.TERMINAL_TYPE_pop_set
__swig_getmethods__["pop"] = _pdq.TERMINAL_TYPE_pop_get
if _newclass:pop = _swig_property(_pdq.TERMINAL_TYPE_pop_get, _pdq.TERMINAL_TYPE_pop_set)
__swig_setmethods__["think"] = _pdq.TERMINAL_TYPE_think_set
__swig_getmethods__["think"] = _pdq.TERMINAL_TYPE_think_get
if _newclass:think = _swig_property(_pdq.TERMINAL_TYPE_think_get, _pdq.TERMINAL_TYPE_think_set)
__swig_setmethods__["sys"] = _pdq.TERMINAL_TYPE_sys_set
__swig_getmethods__["sys"] = _pdq.TERMINAL_TYPE_sys_get
if _newclass:sys = _swig_property(_pdq.TERMINAL_TYPE_sys_get, _pdq.TERMINAL_TYPE_sys_set)
def __init__(self):
this = _pdq.new_TERMINAL_TYPE()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pdq.delete_TERMINAL_TYPE
__del__ = lambda self : None;
TERMINAL_TYPE_swigregister = _pdq.TERMINAL_TYPE_swigregister
TERMINAL_TYPE_swigregister(TERMINAL_TYPE)
class BATCH_TYPE(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, BATCH_TYPE, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, BATCH_TYPE, name)
__repr__ = _swig_repr
__swig_setmethods__["name"] = _pdq.BATCH_TYPE_name_set
__swig_getmethods__["name"] = _pdq.BATCH_TYPE_name_get
if _newclass:name = _swig_property(_pdq.BATCH_TYPE_name_get, _pdq.BATCH_TYPE_name_set)
__swig_setmethods__["pop"] = _pdq.BATCH_TYPE_pop_set
__swig_getmethods__["pop"] = _pdq.BATCH_TYPE_pop_get
if _newclass:pop = _swig_property(_pdq.BATCH_TYPE_pop_get, _pdq.BATCH_TYPE_pop_set)
__swig_setmethods__["sys"] = _pdq.BATCH_TYPE_sys_set
__swig_getmethods__["sys"] = _pdq.BATCH_TYPE_sys_get
if _newclass:sys = _swig_property(_pdq.BATCH_TYPE_sys_get, _pdq.BATCH_TYPE_sys_set)
def __init__(self):
this = _pdq.new_BATCH_TYPE()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pdq.delete_BATCH_TYPE
__del__ = lambda self : None;
BATCH_TYPE_swigregister = _pdq.BATCH_TYPE_swigregister
BATCH_TYPE_swigregister(BATCH_TYPE)
class TRANSACTION_TYPE(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TRANSACTION_TYPE, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TRANSACTION_TYPE, name)
__repr__ = _swig_repr
__swig_setmethods__["name"] = _pdq.TRANSACTION_TYPE_name_set
__swig_getmethods__["name"] = _pdq.TRANSACTION_TYPE_name_get
if _newclass:name = _swig_property(_pdq.TRANSACTION_TYPE_name_get, _pdq.TRANSACTION_TYPE_name_set)
__swig_setmethods__["arrival_rate"] = _pdq.TRANSACTION_TYPE_arrival_rate_set
__swig_getmethods__["arrival_rate"] = _pdq.TRANSACTION_TYPE_arrival_rate_get
if _newclass:arrival_rate = _swig_property(_pdq.TRANSACTION_TYPE_arrival_rate_get, _pdq.TRANSACTION_TYPE_arrival_rate_set)
__swig_setmethods__["saturation_rate"] = _pdq.TRANSACTION_TYPE_saturation_rate_set
__swig_getmethods__["saturation_rate"] = _pdq.TRANSACTION_TYPE_saturation_rate_get
if _newclass:saturation_rate = _swig_property(_pdq.TRANSACTION_TYPE_saturation_rate_get, _pdq.TRANSACTION_TYPE_saturation_rate_set)
__swig_setmethods__["sys"] = _pdq.TRANSACTION_TYPE_sys_set
__swig_getmethods__["sys"] = _pdq.TRANSACTION_TYPE_sys_get
if _newclass:sys = _swig_property(_pdq.TRANSACTION_TYPE_sys_get, _pdq.TRANSACTION_TYPE_sys_set)
def __init__(self):
this = _pdq.new_TRANSACTION_TYPE()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pdq.delete_TRANSACTION_TYPE
__del__ = lambda self : None;
TRANSACTION_TYPE_swigregister = _pdq.TRANSACTION_TYPE_swigregister
TRANSACTION_TYPE_swigregister(TRANSACTION_TYPE)
class JOB_TYPE(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, JOB_TYPE, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, JOB_TYPE, name)
__repr__ = _swig_repr
__swig_setmethods__["should_be_class"] = _pdq.JOB_TYPE_should_be_class_set
__swig_getmethods__["should_be_class"] = _pdq.JOB_TYPE_should_be_class_get
if _newclass:should_be_class = _swig_property(_pdq.JOB_TYPE_should_be_class_get, _pdq.JOB_TYPE_should_be_class_set)
__swig_setmethods__["network"] = _pdq.JOB_TYPE_network_set
__swig_getmethods__["network"] = _pdq.JOB_TYPE_network_get
if _newclass:network = _swig_property(_pdq.JOB_TYPE_network_get, _pdq.JOB_TYPE_network_set)
__swig_setmethods__["term"] = _pdq.JOB_TYPE_term_set
__swig_getmethods__["term"] = _pdq.JOB_TYPE_term_get
if _newclass:term = _swig_property(_pdq.JOB_TYPE_term_get, _pdq.JOB_TYPE_term_set)
__swig_setmethods__["batch"] = _pdq.JOB_TYPE_batch_set
__swig_getmethods__["batch"] = _pdq.JOB_TYPE_batch_get
if _newclass:batch = _swig_property(_pdq.JOB_TYPE_batch_get, _pdq.JOB_TYPE_batch_set)
__swig_setmethods__["trans"] = _pdq.JOB_TYPE_trans_set
__swig_getmethods__["trans"] = _pdq.JOB_TYPE_trans_get
if _newclass:trans = _swig_property(_pdq.JOB_TYPE_trans_get, _pdq.JOB_TYPE_trans_set)
def __init__(self):
this = _pdq.new_JOB_TYPE()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pdq.delete_JOB_TYPE
__del__ = lambda self : None;
JOB_TYPE_swigregister = _pdq.JOB_TYPE_swigregister
JOB_TYPE_swigregister(JOB_TYPE)
class NODE_TYPE(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, NODE_TYPE, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, NODE_TYPE, name)
__repr__ = _swig_repr
__swig_setmethods__["devtype"] = _pdq.NODE_TYPE_devtype_set
__swig_getmethods__["devtype"] = _pdq.NODE_TYPE_devtype_get
if _newclass:devtype = _swig_property(_pdq.NODE_TYPE_devtype_get, _pdq.NODE_TYPE_devtype_set)
__swig_setmethods__["sched"] = _pdq.NODE_TYPE_sched_set
__swig_getmethods__["sched"] = _pdq.NODE_TYPE_sched_get
if _newclass:sched = _swig_property(_pdq.NODE_TYPE_sched_get, _pdq.NODE_TYPE_sched_set)
__swig_setmethods__["devname"] = _pdq.NODE_TYPE_devname_set
__swig_getmethods__["devname"] = _pdq.NODE_TYPE_devname_get
if _newclass:devname = _swig_property(_pdq.NODE_TYPE_devname_get, _pdq.NODE_TYPE_devname_set)
__swig_setmethods__["visits"] = _pdq.NODE_TYPE_visits_set
__swig_getmethods__["visits"] = _pdq.NODE_TYPE_visits_get
if _newclass:visits = _swig_property(_pdq.NODE_TYPE_visits_get, _pdq.NODE_TYPE_visits_set)
__swig_setmethods__["service"] = _pdq.NODE_TYPE_service_set
__swig_getmethods__["service"] = _pdq.NODE_TYPE_service_get
if _newclass:service = _swig_property(_pdq.NODE_TYPE_service_get, _pdq.NODE_TYPE_service_set)
__swig_setmethods__["demand"] = _pdq.NODE_TYPE_demand_set
__swig_getmethods__["demand"] = _pdq.NODE_TYPE_demand_get
if _newclass:demand = _swig_property(_pdq.NODE_TYPE_demand_get, _pdq.NODE_TYPE_demand_set)
__swig_setmethods__["resit"] = _pdq.NODE_TYPE_resit_set
__swig_getmethods__["resit"] = _pdq.NODE_TYPE_resit_get
if _newclass:resit = _swig_property(_pdq.NODE_TYPE_resit_get, _pdq.NODE_TYPE_resit_set)
__swig_setmethods__["utiliz"] = _pdq.NODE_TYPE_utiliz_set
__swig_getmethods__["utiliz"] = _pdq.NODE_TYPE_utiliz_get
if _newclass:utiliz = _swig_property(_pdq.NODE_TYPE_utiliz_get, _pdq.NODE_TYPE_utiliz_set)
__swig_setmethods__["qsize"] = _pdq.NODE_TYPE_qsize_set
__swig_getmethods__["qsize"] = _pdq.NODE_TYPE_qsize_get
if _newclass:qsize = _swig_property(_pdq.NODE_TYPE_qsize_get, _pdq.NODE_TYPE_qsize_set)
__swig_setmethods__["avqsize"] = _pdq.NODE_TYPE_avqsize_set
__swig_getmethods__["avqsize"] = _pdq.NODE_TYPE_avqsize_get
if _newclass:avqsize = _swig_property(_pdq.NODE_TYPE_avqsize_get, _pdq.NODE_TYPE_avqsize_set)
def __init__(self):
this = _pdq.new_NODE_TYPE()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pdq.delete_NODE_TYPE
__del__ = lambda self : None;
NODE_TYPE_swigregister = _pdq.NODE_TYPE_swigregister
NODE_TYPE_swigregister(NODE_TYPE)
def CreateClosed(*args):
return _pdq.CreateClosed(*args)
CreateClosed = _pdq.CreateClosed
def CreateClosed_p(*args):
return _pdq.CreateClosed_p(*args)
CreateClosed_p = _pdq.CreateClosed_p
def CreateOpen(*args):
return _pdq.CreateOpen(*args)
CreateOpen = _pdq.CreateOpen
def CreateOpen_p(*args):
return _pdq.CreateOpen_p(*args)
CreateOpen_p = _pdq.CreateOpen_p
def CreateNode(*args):
return _pdq.CreateNode(*args)
CreateNode = _pdq.CreateNode
def CreateMultiNode(*args):
return _pdq.CreateMultiNode(*args)
CreateMultiNode = _pdq.CreateMultiNode
def GetStreamsCount():
return _pdq.GetStreamsCount()
GetStreamsCount = _pdq.GetStreamsCount
def GetNodesCount():
return _pdq.GetNodesCount()
GetNodesCount = _pdq.GetNodesCount
def GetResponse(*args):
return _pdq.GetResponse(*args)
GetResponse = _pdq.GetResponse
def GetResidenceTime(*args):
return _pdq.GetResidenceTime(*args)
GetResidenceTime = _pdq.GetResidenceTime
def GetThruput(*args):
return _pdq.GetThruput(*args)
GetThruput = _pdq.GetThruput
def GetLoadOpt(*args):
return _pdq.GetLoadOpt(*args)
GetLoadOpt = _pdq.GetLoadOpt
def GetUtilization(*args):
return _pdq.GetUtilization(*args)
GetUtilization = _pdq.GetUtilization
def GetQueueLength(*args):
return _pdq.GetQueueLength(*args)
GetQueueLength = _pdq.GetQueueLength
def PDQ_GetThruMax(*args):
return _pdq.PDQ_GetThruMax(*args)
PDQ_GetThruMax = _pdq.PDQ_GetThruMax
def Init(*args):
return _pdq.Init(*args)
Init = _pdq.Init
def Report():
return _pdq.Report()
Report = _pdq.Report
def SetDebug(*args):
return _pdq.SetDebug(*args)
SetDebug = _pdq.SetDebug
def SetDemand(*args):
return _pdq.SetDemand(*args)
SetDemand = _pdq.SetDemand
def SetDemand_p(*args):
return _pdq.SetDemand_p(*args)
SetDemand_p = _pdq.SetDemand_p
def SetVisits(*args):
return _pdq.SetVisits(*args)
SetVisits = _pdq.SetVisits
def SetVisits_p(*args):
return _pdq.SetVisits_p(*args)
SetVisits_p = _pdq.SetVisits_p
def Solve(*args):
return _pdq.Solve(*args)
Solve = _pdq.Solve
def SetWUnit(*args):
return _pdq.SetWUnit(*args)
SetWUnit = _pdq.SetWUnit
def SetTUnit(*args):
return _pdq.SetTUnit(*args)
SetTUnit = _pdq.SetTUnit
def SetComment(*args):
return _pdq.SetComment(*args)
SetComment = _pdq.SetComment
def GetComment():
return _pdq.GetComment()
GetComment = _pdq.GetComment
def PrintNodes():
return _pdq.PrintNodes()
PrintNodes = _pdq.PrintNodes
def GetNode(*args):
return _pdq.GetNode(*args)
GetNode = _pdq.GetNode
def getjob(*args):
return _pdq.getjob(*args)
getjob = _pdq.getjob
def resets(*args):
return _pdq.resets(*args)
resets = _pdq.resets
def debug(*args):
return _pdq.debug(*args)
debug = _pdq.debug
def errmsg(*args):
return _pdq.errmsg(*args)
errmsg = _pdq.errmsg
def approx():
return _pdq.approx()
approx = _pdq.approx
def canonical():
return _pdq.canonical()
canonical = _pdq.canonical
def exact():
return _pdq.exact()
exact = _pdq.exact
def getjob_index(*args):
return _pdq.getjob_index(*args)
getjob_index = _pdq.getjob_index
def getjob_name(*args):
return _pdq.getjob_name(*args)
getjob_name = _pdq.getjob_name
def getnode_index(*args):
return _pdq.getnode_index(*args)
getnode_index = _pdq.getnode_index
def typetostr(*args):
return _pdq.typetostr(*args)
typetostr = _pdq.typetostr
# This file is compatible with both classic and new-style classes.
| {
"content_hash": "93ad7dc48a4304779629fa19472d28cf",
"timestamp": "",
"source": "github",
"line_count": 451,
"max_line_length": 135,
"avg_line_length": 38.70509977827051,
"alnum_prop": 0.6701993583868011,
"repo_name": "evelynmitchell/pdq",
"id": "838cec04176a70b973d492923729673eff3615c6",
"size": "17664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pdq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "698982"
},
{
"name": "Java",
"bytes": "169184"
},
{
"name": "Makefile",
"bytes": "8036"
},
{
"name": "PHP",
"bytes": "252407"
},
{
"name": "Perl",
"bytes": "17066"
},
{
"name": "Python",
"bytes": "22975"
},
{
"name": "R",
"bytes": "151016"
},
{
"name": "Rebol",
"bytes": "1610"
},
{
"name": "Shell",
"bytes": "5565"
}
],
"symlink_target": ""
} |
import requests
import sys
import json
URL = "http://openpayments.us/data?query=%s"
query = sys.argv[1]
r = requests.get(URL % query)
jsondata = r.text
#print jsondata # raw json
data = json.loads(jsondata) # dictionary version
print(json.dumps(data, indent=4))
| {
"content_hash": "f72c37d92577a9a38dfba798fb96d683",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 60,
"avg_line_length": 19.0625,
"alnum_prop": 0.6327868852459017,
"repo_name": "parrt/msan692",
"id": "05460565336dde993759b0499a3793440eb580c0",
"size": "352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notes/code/openpayments/search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "11717032"
},
{
"name": "Jupyter Notebook",
"bytes": "264133"
},
{
"name": "Python",
"bytes": "79564"
},
{
"name": "Shell",
"bytes": "2500"
},
{
"name": "TeX",
"bytes": "11367"
}
],
"symlink_target": ""
} |
"""
This module handles sending static content such as in-memory data or
files. At this time it has cache helpers and understands the
if-modified-since request header.
"""
import os, time, mimetypes, zipfile, tarfile
from paste.httpexceptions import *
from paste.httpheaders import *
CACHE_SIZE = 4096
BLOCK_SIZE = 4096 * 16
__all__ = ['DataApp', 'FileApp', 'ArchiveStore']
class DataApp(object):
"""
Returns an application that will send content in a single chunk,
this application has support for setting cache-control and for
responding to conditional (or HEAD) requests.
Constructor Arguments:
``content`` the content being sent to the client
``headers`` the headers to send /w the response
The remaining ``kwargs`` correspond to headers, where the
underscore is replaced with a dash. These values are only
added to the headers if they are not already provided; thus,
they can be used for default values. Examples include, but
are not limited to:
``content_type``
``content_encoding``
``content_location``
``cache_control()``
This method provides validated construction of the ``Cache-Control``
header as well as providing for automated filling out of the
``EXPIRES`` header for HTTP/1.0 clients.
``set_content()``
This method provides a mechanism to set the content after the
application has been constructed. This method does things
like changing ``Last-Modified`` and ``Content-Length`` headers.
"""
allowed_methods = ('GET', 'HEAD')
def __init__(self, content, headers=None, allowed_methods=None,
**kwargs):
assert isinstance(headers, (type(None), list))
self.expires = None
self.content = None
self.content_length = None
self.last_modified = 0
if allowed_methods is not None:
self.allowed_methods = allowed_methods
self.headers = headers or []
for (k, v) in kwargs.items():
header = get_header(k)
header.update(self.headers, v)
ACCEPT_RANGES.update(self.headers, bytes=True)
if not CONTENT_TYPE(self.headers):
CONTENT_TYPE.update(self.headers)
if content is not None:
self.set_content(content)
def cache_control(self, **kwargs):
self.expires = CACHE_CONTROL.apply(self.headers, **kwargs) or None
return self
def set_content(self, content, last_modified=None):
assert content is not None
if last_modified is None:
self.last_modified = time.time()
else:
self.last_modified = last_modified
self.content = content
self.content_length = len(content)
LAST_MODIFIED.update(self.headers, time=self.last_modified)
return self
def content_disposition(self, **kwargs):
CONTENT_DISPOSITION.apply(self.headers, **kwargs)
return self
def __call__(self, environ, start_response):
method = environ['REQUEST_METHOD'].upper()
if method not in self.allowed_methods:
exc = HTTPMethodNotAllowed(
'You cannot %s a file' % method,
headers=[('Allow', ','.join(self.allowed_methods))])
return exc(environ, start_response)
return self.get(environ, start_response)
def calculate_etag(self):
return str(self.last_modified) + '-' + str(self.content_length)
def get(self, environ, start_response):
headers = self.headers[:]
current_etag = self.calculate_etag()
ETAG.update(headers, current_etag)
if self.expires is not None:
EXPIRES.update(headers, delta=self.expires)
try:
client_etags = IF_NONE_MATCH.parse(environ)
if client_etags:
for etag in client_etags:
if etag == current_etag or etag == '*':
# horribly inefficient, n^2 performance, yuck!
for head in list_headers(entity=True):
head.delete(headers)
start_response('304 Not Modified', headers)
return ['']
except HTTPBadRequest, exce:
return exce.wsgi_application(environ, start_response)
# If we get If-None-Match and If-Modified-Since, and
# If-None-Match doesn't match, then we should not try to
# figure out If-Modified-Since (which has 1-second granularity
# and just isn't as accurate)
if not client_etags:
try:
client_clock = IF_MODIFIED_SINCE.parse(environ)
if client_clock >= int(self.last_modified):
# horribly inefficient, n^2 performance, yuck!
for head in list_headers(entity=True):
head.delete(headers)
start_response('304 Not Modified', headers)
return [''] # empty body
except HTTPBadRequest, exce:
return exce.wsgi_application(environ, start_response)
(lower, upper) = (0, self.content_length - 1)
range = RANGE.parse(environ)
if range and 'bytes' == range[0] and 1 == len(range[1]):
(lower, upper) = range[1][0]
upper = upper or (self.content_length - 1)
if upper >= self.content_length or lower > upper:
return HTTPRequestRangeNotSatisfiable((
"Range request was made beyond the end of the content,\r\n"
"which is %s long.\r\n Range: %s\r\n") % (
self.content_length, RANGE(environ))
).wsgi_application(environ, start_response)
content_length = upper - lower + 1
CONTENT_RANGE.update(headers, first_byte=lower, last_byte=upper,
total_length = self.content_length)
CONTENT_LENGTH.update(headers, content_length)
if content_length == self.content_length:
start_response('200 OK', headers)
else:
start_response('206 Partial Content', headers)
if self.content is not None:
return [self.content[lower:upper+1]]
return (lower, content_length)
class FileApp(DataApp):
"""
Returns an application that will send the file at the given
filename. Adds a mime type based on ``mimetypes.guess_type()``.
See DataApp for the arguments beyond ``filename``.
"""
def __init__(self, filename, headers=None, **kwargs):
self.filename = filename
content_type, content_encoding = self.guess_type()
if content_type and 'content_type' not in kwargs:
kwargs['content_type'] = content_type
if content_encoding and 'content_encoding' not in kwargs:
kwargs['content_encoding'] = content_encoding
DataApp.__init__(self, None, headers, **kwargs)
def guess_type(self):
return mimetypes.guess_type(self.filename)
def update(self, force=False):
stat = os.stat(self.filename)
if not force and stat.st_mtime == self.last_modified:
return
self.last_modified = stat.st_mtime
if stat.st_size < CACHE_SIZE:
fh = open(self.filename,"rb")
self.set_content(fh.read(), stat.st_mtime)
fh.close()
else:
self.content = None
self.content_length = stat.st_size
# This is updated automatically if self.set_content() is
# called
LAST_MODIFIED.update(self.headers, time=self.last_modified)
def get(self, environ, start_response):
is_head = environ['REQUEST_METHOD'].upper() == 'HEAD'
if 'max-age=0' in CACHE_CONTROL(environ).lower():
self.update(force=True) # RFC 2616 13.2.6
else:
self.update()
if not self.content:
if not os.path.exists(self.filename):
exc = HTTPNotFound(
'The resource does not exist',
comment="No file at %r" % self.filename)
return exc(environ, start_response)
try:
file = open(self.filename, 'rb')
except (IOError, OSError), e:
exc = HTTPForbidden(
'You are not permitted to view this file (%s)' % e)
return exc.wsgi_application(
environ, start_response)
retval = DataApp.get(self, environ, start_response)
if isinstance(retval, list):
# cached content, exception, or not-modified
if is_head:
return ['']
return retval
(lower, content_length) = retval
if is_head:
return ['']
file.seek(lower)
return _FileIter(file, size=content_length)
class _FileIter(object):
def __init__(self, file, block_size=None, size=None):
self.file = file
self.size = size
self.block_size = block_size or BLOCK_SIZE
def __iter__(self):
return self
def next(self):
chunk_size = self.block_size
if self.size is not None:
if chunk_size > self.size:
chunk_size = self.size
self.size -= chunk_size
data = self.file.read(chunk_size)
if not data:
raise StopIteration
return data
def close(self):
self.file.close()
class ArchiveStore(object):
"""
Returns an application that serves up a DataApp for items requested
in a given zip or tar archive.
Constructor Arguments:
``filepath`` the path to the archive being served
``cache_control()``
This method provides validated construction of the ``Cache-Control``
header as well as providing for automated filling out of the
``EXPIRES`` header for HTTP/1.0 clients.
"""
def __init__(self, filepath):
if zipfile.is_zipfile(filepath):
self.archive = zipfile.ZipFile(filepath,"r")
elif tarfile.is_tarfile(filepath):
self.archive = tarfile.TarFileCompat(filepath,"r")
else:
raise AssertionError("filepath '%s' is not a zip or tar " % filepath)
self.expires = None
self.last_modified = time.time()
self.cache = {}
def cache_control(self, **kwargs):
self.expires = CACHE_CONTROL.apply(self.headers, **kwargs) or None
return self
def __call__(self, environ, start_response):
path = environ.get("PATH_INFO","")
if path.startswith("/"):
path = path[1:]
application = self.cache.get(path)
if application:
return application(environ, start_response)
try:
info = self.archive.getinfo(path)
except KeyError:
exc = HTTPNotFound("The file requested, '%s', was not found." % path)
return exc.wsgi_application(environ, start_response)
if info.filename.endswith("/"):
exc = HTTPNotFound("Path requested, '%s', is not a file." % path)
return exc.wsgi_application(environ, start_response)
content_type, content_encoding = mimetypes.guess_type(info.filename)
app = DataApp(None, content_type = content_type,
content_encoding = content_encoding)
app.set_content(self.archive.read(path),
time.mktime(info.date_time + (0,0,0)))
self.cache[path] = app
app.expires = self.expires
return app(environ, start_response)
| {
"content_hash": "c5b58f9902bae6e7d2b0715c21be86fb",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 81,
"avg_line_length": 37.91883116883117,
"alnum_prop": 0.5854097097354225,
"repo_name": "santisiri/popego",
"id": "e72db7f08691e3d9242a0a8301872b78a57b86ec",
"size": "12046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/Paste-1.4.2-py2.5.egg/paste/fileapp.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1246"
},
{
"name": "C",
"bytes": "504141"
},
{
"name": "C++",
"bytes": "26125"
},
{
"name": "CSS",
"bytes": "342653"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "GAP",
"bytes": "13267"
},
{
"name": "Genshi",
"bytes": "407"
},
{
"name": "Groff",
"bytes": "17116"
},
{
"name": "HTML",
"bytes": "383181"
},
{
"name": "JavaScript",
"bytes": "1090769"
},
{
"name": "Makefile",
"bytes": "2441"
},
{
"name": "Mako",
"bytes": "376944"
},
{
"name": "Python",
"bytes": "20895618"
},
{
"name": "Ruby",
"bytes": "3380"
},
{
"name": "Shell",
"bytes": "23581"
},
{
"name": "Smarty",
"bytes": "522"
},
{
"name": "TeX",
"bytes": "35712"
}
],
"symlink_target": ""
} |
from typing import Optional
from tableauserverclient import WorkbookItem
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.tableau.hooks.tableau import TableauHook
class TableauRefreshWorkbookOperator(BaseOperator):
"""
Refreshes a Tableau Workbook/Extract
.. seealso:: https://tableau.github.io/server-client-python/docs/api-ref#workbooks
:param workbook_name: The name of the workbook to refresh.
:type workbook_name: str
:param site_id: The id of the site where the workbook belongs to.
:type site_id: Optional[str]
:param blocking: By default the extract refresh will be blocking means it will wait until it has finished.
:type blocking: bool
:param tableau_conn_id: The :ref:`Tableau Connection id <howto/connection:tableau>`
containing the credentials to authenticate to the Tableau Server.
:type tableau_conn_id: str
"""
def __init__(
self,
*,
workbook_name: str,
site_id: Optional[str] = None,
blocking: bool = True,
tableau_conn_id: str = 'tableau_default',
**kwargs,
) -> None:
super().__init__(**kwargs)
self.workbook_name = workbook_name
self.site_id = site_id
self.blocking = blocking
self.tableau_conn_id = tableau_conn_id
def execute(self, context: dict) -> str:
"""
Executes the Tableau Extract Refresh and pushes the job id to xcom.
:param context: The task context during execution.
:type context: dict
:return: the id of the job that executes the extract refresh
:rtype: str
"""
with TableauHook(self.site_id, self.tableau_conn_id) as tableau_hook:
workbook = self._get_workbook_by_name(tableau_hook)
job_id = self._refresh_workbook(tableau_hook, workbook.id)
if self.blocking:
from airflow.providers.tableau.sensors.tableau_job_status import TableauJobStatusSensor
TableauJobStatusSensor(
job_id=job_id,
site_id=self.site_id,
tableau_conn_id=self.tableau_conn_id,
task_id='wait_until_succeeded',
dag=None,
).execute(context={})
self.log.info('Workbook %s has been successfully refreshed.', self.workbook_name)
return job_id
def _get_workbook_by_name(self, tableau_hook: TableauHook) -> WorkbookItem:
for workbook in tableau_hook.get_all(resource_name='workbooks'):
if workbook.name == self.workbook_name:
self.log.info('Found matching workbook with id %s', workbook.id)
return workbook
raise AirflowException(f'Workbook {self.workbook_name} not found!')
def _refresh_workbook(self, tableau_hook: TableauHook, workbook_id: str) -> str:
job = tableau_hook.server.workbooks.refresh(workbook_id)
self.log.info('Refreshing Workbook %s...', self.workbook_name)
return job.id
| {
"content_hash": "b3f8352c9f9fe5a52848c3b36f8096ba",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 110,
"avg_line_length": 39.18987341772152,
"alnum_prop": 0.6385658914728682,
"repo_name": "sekikn/incubator-airflow",
"id": "cca01e7b451d0b64cef1e19d88ec0d71b97fee4c",
"size": "3881",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/providers/tableau/operators/tableau_refresh_workbook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
"""Functions to plot epochs data."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Jaakko Leppakangas <[email protected]>
# Jona Sassenhagen <[email protected]>
# Stefan Repplinger <[email protected]>
# Daniel McCloy <[email protected]>
#
# License: Simplified BSD
from collections import Counter
from functools import partial
from copy import deepcopy
import warnings
import numpy as np
from ..defaults import _handle_default
from ..utils import verbose, logger, warn, fill_doc, check_version
from ..io.meas_info import create_info, _validate_type
from ..io.pick import (pick_types, channel_type, _get_channel_types,
_picks_to_idx, _DATA_CH_TYPES_SPLIT,
_DATA_CH_TYPES_ORDER_DEFAULT, _VALID_CHANNEL_TYPES)
from ..time_frequency import psd_multitaper
from .utils import (tight_layout, figure_nobar, _toggle_proj, _toggle_options,
_prepare_mne_browse, _setup_vmin_vmax, _channels_changed,
_plot_raw_onscroll, _onclick_help, plt_show, _check_cov,
_compute_scalings, DraggableColorbar, _setup_cmap,
_handle_decim, _setup_plot_projector, _set_ax_label_style,
_set_title_multiple_electrodes, _make_combine_callable,
_get_figsize_from_config, _toggle_scrollbars,
_check_psd_fmax, _set_window_title)
from .misc import _handle_event_colors
@fill_doc
def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap=None, fig=None,
axes=None, overlay_times=None, combine=None,
group_by=None, evoked=True, ts_args=None, title=None,
clear=False):
"""Plot Event Related Potential / Fields image.
Parameters
----------
epochs : instance of Epochs
The epochs.
%(picks_good_data)s
``picks`` interacts with ``group_by`` and ``combine`` to determine the
number of figures generated; see Notes.
sigma : float
The standard deviation of a Gaussian smoothing window applied along
the epochs axis of the image. If 0, no smoothing is applied.
Defaults to 0.
vmin : None | float | callable
The min value in the image (and the ER[P/F]). The unit is µV for
EEG channels, fT for magnetometers and fT/cm for gradiometers.
If vmin is None and multiple plots are returned, the limit is
equalized within channel types.
Hint: to specify the lower limit of the data, use
``vmin=lambda data: data.min()``.
vmax : None | float | callable
The max value in the image (and the ER[P/F]). The unit is µV for
EEG channels, fT for magnetometers and fT/cm for gradiometers.
If vmin is None and multiple plots are returned, the limit is
equalized within channel types.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not ``None``, order is used to reorder the epochs along the y-axis
of the image. If it is an array of :class:`int`, its length should
match the number of good epochs. If it is a callable it should accept
two positional parameters (``times`` and ``data``, where
``data.shape == (len(good_epochs), len(times))``) and return an
:class:`array <numpy.ndarray>` of indices that will sort ``data`` along
its first axis.
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to ``units=dict(eeg='µV', grad='fT/cm', mag='fT')``.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to ``scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)``.
cmap : None | colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ('RdBu_r', True).
If None, "RdBu_r" is used, unless the data is all positive, in which
case "Reds" is used.
fig : Figure | None
:class:`~matplotlib.figure.Figure` instance to draw the image to.
Figure must contain the correct number of axes for drawing the epochs
image, the evoked response, and a colorbar (depending on values of
``evoked`` and ``colorbar``). If ``None`` a new figure is created.
Defaults to ``None``.
axes : list of Axes | dict of list of Axes | None
List of :class:`~matplotlib.axes.Axes` objects in which to draw the
image, evoked response, and colorbar (in that order). Length of list
must be 1, 2, or 3 (depending on values of ``colorbar`` and ``evoked``
parameters). If a :class:`dict`, each entry must be a list of Axes
objects with the same constraints as above. If both ``axes`` and
``group_by`` are dicts, their keys must match. Providing non-``None``
values for both ``fig`` and ``axes`` results in an error. Defaults to
``None``.
overlay_times : array_like, shape (n_epochs,) | None
Times (in seconds) at which to draw a line on the corresponding row of
the image (e.g., a reaction time associated with each epoch). Note that
``overlay_times`` should be ordered to correspond with the
:class:`~mne.Epochs` object (i.e., ``overlay_times[0]`` corresponds to
``epochs[0]``, etc).
%(combine)s
If callable, the callable must accept one positional input (data of
shape ``(n_epochs, n_channels, n_times)``) and return an
:class:`array <numpy.ndarray>` of shape ``(n_epochs, n_times)``. For
example::
combine = lambda data: np.median(data, axis=1)
If ``combine`` is ``None``, channels are combined by computing GFP,
unless ``group_by`` is also ``None`` and ``picks`` is a list of
specific channels (not channel types), in which case no combining is
performed and each channel gets its own figure. See Notes for further
details. Defaults to ``None``.
group_by : None | dict
Specifies which channels are aggregated into a single figure, with
aggregation method determined by the ``combine`` parameter. If not
``None``, one :class:`~matplotlib.figure.Figure` is made per dict
entry; the dict key will be used as the figure title and the dict
values must be lists of picks (either channel names or integer indices
of ``epochs.ch_names``). For example::
group_by=dict(Left_ROI=[1, 2, 3, 4], Right_ROI=[5, 6, 7, 8])
Note that within a dict entry all channels must have the same type.
``group_by`` interacts with ``picks`` and ``combine`` to determine the
number of figures generated; see Notes. Defaults to ``None``.
evoked : bool
Draw the ER[P/F] below the image or not.
ts_args : None | dict
Arguments passed to a call to `~mne.viz.plot_compare_evokeds` to style
the evoked plot below the image. Defaults to an empty dictionary,
meaning `~mne.viz.plot_compare_evokeds` will be called with default
parameters.
title : None | str
If :class:`str`, will be plotted as figure title. Otherwise, the
title will indicate channel(s) or channel type being plotted. Defaults
to ``None``.
clear : bool
Whether to clear the axes before plotting (if ``fig`` or ``axes`` are
provided). Defaults to ``False``.
Returns
-------
figs : list of Figure
One figure per channel, channel type, or group, depending on values of
``picks``, ``group_by``, and ``combine``. See Notes.
Notes
-----
You can control how channels are aggregated into one figure or plotted in
separate figures through a combination of the ``picks``, ``group_by``, and
``combine`` parameters. If ``group_by`` is a :class:`dict`, the result is
one :class:`~matplotlib.figure.Figure` per dictionary key (for any valid
values of ``picks`` and ``combine``). If ``group_by`` is ``None``, the
number and content of the figures generated depends on the values of
``picks`` and ``combine``, as summarized in this table:
.. cssclass:: table-bordered
.. rst-class:: midvalign
+----------+----------------------------+------------+-------------------+
| group_by | picks | combine | result |
+==========+============================+============+===================+
| | None, int, list of int, | None, | |
| dict | ch_name, list of ch_names, | string, or | 1 figure per |
| | ch_type, list of ch_types | callable | dict key |
+----------+----------------------------+------------+-------------------+
| | None, | None, | |
| | ch_type, | string, or | 1 figure per |
| | list of ch_types | callable | ch_type |
| None +----------------------------+------------+-------------------+
| | int, | None | 1 figure per pick |
| | ch_name, +------------+-------------------+
| | list of int, | string or | 1 figure |
| | list of ch_names | callable | |
+----------+----------------------------+------------+-------------------+
"""
from scipy.ndimage import gaussian_filter1d
from .. import EpochsArray
_validate_type(group_by, (dict, None), 'group_by')
units = _handle_default('units', units)
scalings = _handle_default('scalings', scalings)
if set(units) != set(scalings):
raise ValueError('Scalings and units must have the same keys.')
# is picks a channel type (or None)?
picks, picked_types = _picks_to_idx(epochs.info, picks, return_kind=True)
ch_types = _get_channel_types(epochs.info, picks)
# `combine` defaults to 'gfp' unless picks are specific channels and
# there was no group_by passed
combine_given = combine is not None
if combine is None and (group_by is not None or picked_types):
combine = 'gfp'
# convert `combine` into callable (if None or str)
combine_func = _make_combine_callable(combine)
# handle ts_args (params for the evoked time series)
ts_args = dict() if ts_args is None else ts_args
manual_ylims = 'ylim' in ts_args
if combine is not None:
ts_args['show_sensors'] = False
vlines = [0] if (epochs.times[0] < 0 < epochs.times[-1]) else []
ts_defaults = dict(colors={'cond': 'k'}, title='', show=False,
truncate_yaxis='auto', truncate_xaxis=False,
vlines=vlines, legend=False)
ts_defaults.update(**ts_args)
ts_args = ts_defaults.copy()
# construct a group_by dict if one wasn't supplied
if group_by is None:
if picked_types:
# one fig per ch_type
group_by = {ch_type: picks[np.array(ch_types) == ch_type]
for ch_type in set(ch_types)
if ch_type in _DATA_CH_TYPES_SPLIT}
elif combine is None:
# one fig per pick
group_by = {epochs.ch_names[pick]: [pick] for pick in picks}
else:
# one fig to rule them all
ch_names = np.array(epochs.ch_names)[picks].tolist()
key = _set_title_multiple_electrodes(None, combine, ch_names)
group_by = {key: picks}
else:
group_by = deepcopy(group_by)
# check for heterogeneous sensor type combinations / "combining" 1 channel
for this_group, these_picks in group_by.items():
this_ch_type = np.array(ch_types)[np.in1d(picks, these_picks)]
if len(set(this_ch_type)) > 1:
types = ', '.join(set(this_ch_type))
raise ValueError('Cannot combine sensors of different types; "{}" '
'contains types {}.'.format(this_group, types))
# now we know they're all the same type...
group_by[this_group] = dict(picks=these_picks, ch_type=this_ch_type[0],
title=title)
# are they trying to combine a single channel?
if len(these_picks) < 2 and combine_given:
warn('Only one channel in group "{}"; cannot combine by method '
'"{}".'.format(this_group, combine))
# check for compatible `fig` / `axes`; instantiate figs if needed; add
# fig(s) and axes into group_by
group_by = _validate_fig_and_axes(fig, axes, group_by, evoked, colorbar,
clear=clear)
# prepare images in advance to get consistent vmin/vmax.
# At the same time, create a subsetted epochs object for each group
data = epochs.get_data()
vmin_vmax = {ch_type: dict(images=list(), norm=list())
for ch_type in set(ch_types)}
for this_group, this_group_dict in group_by.items():
these_picks = this_group_dict['picks']
this_ch_type = this_group_dict['ch_type']
this_ch_info = [epochs.info['chs'][n] for n in these_picks]
these_ch_names = np.array(epochs.info['ch_names'])[these_picks]
this_data = data[:, these_picks]
# create subsetted epochs object
this_info = create_info(sfreq=epochs.info['sfreq'],
ch_names=list(these_ch_names),
ch_types=[this_ch_type] * len(these_picks))
this_info['chs'] = this_ch_info
this_epochs = EpochsArray(this_data, this_info, tmin=epochs.times[0])
# apply scalings (only to image, not epochs object), combine channels
this_image = combine_func(this_data * scalings[this_ch_type])
# handle `order`. NB: this can potentially yield different orderings
# in each figure!
this_image, overlay_times = _order_epochs(this_image, epochs.times,
order, overlay_times)
this_norm = np.all(this_image > 0)
# apply smoothing
if sigma > 0.:
this_image = gaussian_filter1d(this_image, sigma=sigma, axis=0,
mode='nearest')
# update the group_by and vmin_vmax dicts
group_by[this_group].update(image=this_image, epochs=this_epochs,
norm=this_norm)
vmin_vmax[this_ch_type]['images'].append(this_image)
vmin_vmax[this_ch_type]['norm'].append(this_norm)
# compute overall vmin/vmax for images
for ch_type, this_vmin_vmax_dict in vmin_vmax.items():
image_list = this_vmin_vmax_dict['images']
image_stack = np.stack(image_list)
norm = all(this_vmin_vmax_dict['norm'])
vmin_vmax[ch_type] = _setup_vmin_vmax(image_stack, vmin, vmax, norm)
del image_stack, vmin, vmax
# prepare to plot
auto_ylims = {ch_type: [0., 0.] for ch_type in set(ch_types)}
# plot
for this_group, this_group_dict in group_by.items():
this_ch_type = this_group_dict['ch_type']
this_axes_dict = this_group_dict['axes']
vmin, vmax = vmin_vmax[this_ch_type]
# plot title
if this_group_dict['title'] is None:
title = _handle_default('titles').get(this_group, this_group)
if isinstance(combine, str) and len(title):
_comb = combine.upper() if combine == 'gfp' else combine
_comb = 'std. dev.' if _comb == 'std' else _comb
title += ' ({})'.format(_comb)
# plot the image
this_fig = _plot_epochs_image(
this_group_dict['image'], epochs=this_group_dict['epochs'],
picks=picks, colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
style_axes=True, norm=this_group_dict['norm'],
unit=units[this_ch_type], ax=this_axes_dict, show=False,
title=title, combine=combine, combine_given=combine_given,
overlay_times=overlay_times, evoked=evoked, ts_args=ts_args)
group_by[this_group].update(fig=this_fig)
# detect ylims across figures
if evoked and not manual_ylims:
# ensure get_ylim works properly
this_axes_dict['evoked'].figure.canvas.draw_idle()
this_bot, this_top = this_axes_dict['evoked'].get_ylim()
this_min = min(this_bot, this_top)
this_max = max(this_bot, this_top)
curr_min, curr_max = auto_ylims[ch_type]
auto_ylims[this_ch_type] = [min(curr_min, this_min),
max(curr_max, this_max)]
# equalize ylims across figures (does not adjust ticks)
if evoked:
for this_group_dict in group_by.values():
ax = this_group_dict['axes']['evoked']
ch_type = this_group_dict['ch_type']
if not manual_ylims:
args = auto_ylims[ch_type]
func = max
if 'invert_y' in ts_args:
args = args[::-1]
func = min
ax.set_ylim(*args)
yticks = np.array(ax.get_yticks())
top_tick = func(yticks)
ax.spines['left'].set_bounds(top_tick, args[0])
plt_show(show)
# impose deterministic order of returned objects
return_order = np.array(sorted(group_by))
are_ch_types = np.in1d(return_order, _VALID_CHANNEL_TYPES)
if any(are_ch_types):
return_order = np.concatenate((return_order[are_ch_types],
return_order[~are_ch_types]))
return [group_by[group]['fig'] for group in return_order]
def _validate_fig_and_axes(fig, axes, group_by, evoked, colorbar, clear=False):
"""Check user-provided fig/axes compatibility with plot_epochs_image."""
from matplotlib.pyplot import figure, Axes, subplot2grid
n_axes = 1 + int(evoked) + int(colorbar)
ax_names = ('image', 'evoked', 'colorbar')
ax_names = np.array(ax_names)[np.where([True, evoked, colorbar])]
prefix = 'Since evoked={} and colorbar={}, '.format(evoked, colorbar)
# got both fig and axes
if fig is not None and axes is not None:
raise ValueError('At least one of "fig" or "axes" must be None; got '
'fig={}, axes={}.'.format(fig, axes))
# got fig=None and axes=None: make fig(s) and axes
if fig is None and axes is None:
axes = dict()
colspan = 9 if colorbar else 10
rowspan = 2 if evoked else 3
shape = (3, 10)
for this_group in group_by:
this_fig = figure()
_set_window_title(this_fig, this_group)
kwargs = dict()
if check_version('matplotlib', '2.2'):
kwargs['fig'] = this_fig # unavailable on earlier mpl
subplot2grid(shape, (0, 0), colspan=colspan, rowspan=rowspan,
**kwargs)
if evoked:
subplot2grid(shape, (2, 0), colspan=colspan, rowspan=1,
**kwargs)
if colorbar:
subplot2grid(shape, (0, 9), colspan=1, rowspan=rowspan,
**kwargs)
axes[this_group] = this_fig.axes
# got a Figure instance
if fig is not None:
# If we're re-plotting into a fig made by a previous call to
# `plot_image`, be forgiving of presence/absence of sensor inset axis.
if len(fig.axes) not in (n_axes, n_axes + 1):
raise ValueError('{}"fig" must contain {} axes, got {}.'
''.format(prefix, n_axes, len(fig.axes)))
if len(list(group_by)) != 1:
raise ValueError('When "fig" is not None, "group_by" can only '
'have one group (got {}: {}).'
.format(len(group_by), ', '.join(group_by)))
key = list(group_by)[0]
if clear: # necessary if re-plotting into previous figure
_ = [ax.clear() for ax in fig.axes]
if len(fig.axes) > n_axes: # get rid of sensor inset
fig.axes[-1].remove()
_set_window_title(fig, key)
axes = {key: fig.axes}
# got an Axes instance, be forgiving (if evoked and colorbar are False)
if isinstance(axes, Axes):
axes = [axes]
# got an ndarray; be forgiving
if isinstance(axes, np.ndarray):
axes = axes.ravel().tolist()
# got a list of axes, make it a dict
if isinstance(axes, list):
if len(axes) != n_axes:
raise ValueError('{}"axes" must be length {}, got {}.'
''.format(prefix, n_axes, len(axes)))
# for list of axes to work, must be only one group
if len(list(group_by)) != 1:
raise ValueError('When axes is a list, can only plot one group '
'(got {} groups: {}).'
.format(len(group_by), ', '.join(group_by)))
key = list(group_by)[0]
axes = {key: axes}
# got a dict of lists of axes, make it dict of dicts
if isinstance(axes, dict):
# in theory a user could pass a dict of axes but *NOT* pass a group_by
# dict, but that is forbidden in the docstring so it shouldn't happen.
# The next test could fail in that case because we've constructed a
# group_by dict and the user won't have known what keys we chose.
if set(axes) != set(group_by):
raise ValueError('If "axes" is a dict its keys ({}) must match '
'the keys in "group_by" ({}).'
.format(list(axes), list(group_by)))
for this_group, this_axes_list in axes.items():
if len(this_axes_list) != n_axes:
raise ValueError('{}each value in "axes" must be a list of {} '
'axes, got {}.'.format(prefix, n_axes,
len(this_axes_list)))
# NB: next line assumes all axes in each list are in same figure
group_by[this_group]['fig'] = this_axes_list[0].get_figure()
group_by[this_group]['axes'] = {key: axis for key, axis in
zip(ax_names, this_axes_list)}
return group_by
def _order_epochs(data, times, order=None, overlay_times=None):
"""Sort epochs image data (2D). Helper for plot_epochs_image."""
n_epochs = len(data)
if overlay_times is not None:
if len(overlay_times) != n_epochs:
raise ValueError('size of overlay_times parameter ({}) does not '
'match the number of epochs ({}).'
.format(len(overlay_times), n_epochs))
overlay_times = np.array(overlay_times)
times_min = np.min(overlay_times)
times_max = np.max(overlay_times)
if ((times_min < times[0]) or (times_max > times[-1])):
warn('Some values in overlay_times fall outside of the epochs '
'time interval (between %s s and %s s)'
% (times[0], times[-1]))
if callable(order):
order = order(times, data)
if order is not None:
if len(order) != n_epochs:
raise ValueError('If order is a {}, its length ({}) must match '
'the length of the data ({}).'
.format(type(order).__name__, len(order),
n_epochs))
order = np.asarray(order)
data = data[order]
if overlay_times is not None:
overlay_times = overlay_times[order]
return data, overlay_times
def _plot_epochs_image(image, style_axes=True, epochs=None, picks=None,
vmin=None, vmax=None, colorbar=False, show=False,
unit=None, cmap=None, ax=None, overlay_times=None,
title=None, evoked=False, ts_args=None, combine=None,
combine_given=False, norm=False):
"""Plot epochs image. Helper function for plot_epochs_image."""
if cmap is None:
cmap = 'Reds' if norm else 'RdBu_r'
tmin = epochs.times[0]
tmax = epochs.times[-1]
ax_im = ax['image']
fig = ax_im.get_figure()
# draw the image
cmap = _setup_cmap(cmap, norm=norm)
n_epochs = len(image)
extent = [1e3 * tmin, 1e3 * tmax, 0, n_epochs]
im = ax_im.imshow(image, vmin=vmin, vmax=vmax, cmap=cmap[0], aspect='auto',
origin='lower', interpolation='nearest', extent=extent)
# optional things
if style_axes:
ax_im.set_title(title)
ax_im.set_ylabel('Epochs')
ax_im.axis('auto')
ax_im.axis('tight')
ax_im.axvline(0, color='k', linewidth=1, linestyle='--')
if overlay_times is not None:
ax_im.plot(1e3 * overlay_times, 0.5 + np.arange(n_epochs), 'k',
linewidth=2)
ax_im.set_xlim(1e3 * tmin, 1e3 * tmax)
# draw the evoked
if evoked:
from . import plot_compare_evokeds
pass_combine = (combine if combine_given else None)
_picks = [0] if len(picks) == 1 else None # prevent applying GFP
plot_compare_evokeds({'cond': list(epochs.iter_evoked(copy=False))},
picks=_picks, axes=ax['evoked'],
combine=pass_combine, **ts_args)
ax['evoked'].set_xlim(tmin, tmax) # don't multiply by 1e3 here
ax_im.set_xticks([])
# draw the colorbar
if colorbar:
from matplotlib.pyplot import colorbar as cbar
this_colorbar = cbar(im, cax=ax['colorbar'])
this_colorbar.ax.set_ylabel(unit, rotation=270, labelpad=12)
if cmap[1]:
ax_im.CB = DraggableColorbar(this_colorbar, im)
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
tight_layout(fig=fig)
# finish
plt_show(show)
return fig
def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown subj',
color=(0.8, 0.8, 0.8), width=0.8, ignore=('IGNORED',),
show=True):
"""Show the channel stats based on a drop_log from Epochs.
Parameters
----------
drop_log : list of list
Epoch drop log from Epochs.drop_log.
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
from ..epochs import _drop_log_stats
percent = _drop_log_stats(drop_log, ignore)
if percent < threshold:
logger.info('Percent dropped epochs < supplied threshold; not '
'plotting drop log.')
return
scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
ch_names = np.array(list(scores.keys()))
counts = np.array(list(scores.values()))
# init figure, handle easy case (no drops)
fig, ax = plt.subplots()
ax.set_title('{}: {:.1f}%'.format(subject, percent))
if len(ch_names) == 0:
ax.text(0.5, 0.5, 'No drops', ha='center', fontsize=14)
return fig
# count epochs that aren't fully caught by `ignore`
n_used = sum([any(ch not in ignore for ch in d) or len(d) == 0
for d in drop_log])
# calc plot values
n_bars = min(n_max_plot, len(ch_names))
x = np.arange(n_bars)
y = 100 * counts / n_used
order = np.flipud(np.argsort(y))
ax.bar(x, y[order[:n_bars]], color=color, width=width, align='center')
ax.set_xticks(x)
ax.set_xticklabels(ch_names[order[:n_bars]], rotation=45, size=10,
horizontalalignment='right')
ax.set_ylabel('% of epochs rejected')
ax.grid(axis='y')
tight_layout(pad=1, fig=fig)
plt_show(show)
return fig
def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
title_str, axes_handler):
"""Handle drawing epochs axes."""
this = axes_handler[0]
for ii, data_, ax in zip(epoch_idx, data, axes):
for line, d in zip(ax.lines, data_[good_ch_idx]):
line.set_data(times, d)
if bad_ch_idx is not None:
bad_lines = [ax.lines[k] for k in bad_ch_idx]
for line, d in zip(bad_lines, data_[bad_ch_idx]):
line.set_data(times, d)
if title_str is not None:
ax.set_title(title_str % ii, fontsize=12)
ax.set_ylim(data.min(), data.max())
ax.set_yticks(list())
ax.set_xticks(list())
if vars(ax)[this]['reject'] is True:
# memorizing reject
for line in ax.lines:
line.set_color((0.8, 0.8, 0.8))
ax.get_figure().canvas.draw()
else:
# forgetting previous reject
for k in axes_handler:
if k == this:
continue
if vars(ax).get(k, {}).get('reject', None) is True:
for line in ax.lines[:len(good_ch_idx)]:
line.set_color('k')
if bad_ch_idx is not None:
for line in ax.lines[-len(bad_ch_idx):]:
line.set_color('r')
ax.get_figure().canvas.draw()
break
def _epochs_navigation_onclick(event, params):
"""Handle epochs navigation click."""
import matplotlib.pyplot as plt
p = params
here = None
if event.inaxes == p['back'].ax:
here = 1
elif event.inaxes == p['next'].ax:
here = -1
elif event.inaxes == p['reject-quit'].ax:
if p['reject_idx']:
p['epochs'].drop(p['reject_idx'])
plt.close(p['fig'])
plt.close(event.inaxes.get_figure())
if here is not None:
p['idx_handler'].rotate(here)
p['axes_handler'].rotate(here)
this_idx = p['idx_handler'][0]
_draw_epochs_axes(this_idx, p['good_ch_idx'], p['bad_ch_idx'],
p['data'][this_idx],
p['times'], p['axes'], p['title_str'],
p['axes_handler'])
# XXX don't ask me why
p['axes'][0].get_figure().canvas.draw()
def _epochs_axes_onclick(event, params):
"""Handle epochs axes click."""
reject_color = (0.8, 0.8, 0.8)
ax = event.inaxes
if event.inaxes is None:
return
p = params
here = vars(ax)[p['axes_handler'][0]]
if here.get('reject', None) is False:
idx = here['idx']
if idx not in p['reject_idx']:
p['reject_idx'].append(idx)
for line in ax.lines:
line.set_color(reject_color)
here['reject'] = True
elif here.get('reject', None) is True:
idx = here['idx']
if idx in p['reject_idx']:
p['reject_idx'].pop(p['reject_idx'].index(idx))
good_lines = [ax.lines[k] for k in p['good_ch_idx']]
for line in good_lines:
line.set_color('k')
if p['bad_ch_idx'] is not None:
bad_lines = ax.lines[-len(p['bad_ch_idx']):]
for line in bad_lines:
line.set_color('r')
here['reject'] = False
ax.get_figure().canvas.draw()
@fill_doc
def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20, n_channels=20,
title=None, events=None, event_colors=None, order=None,
show=True, block=False, decim='auto', noise_cov=None,
butterfly=False, show_scrollbars=True, epoch_colors=None,
event_id=None):
"""Visualize epochs.
Bad epochs can be marked with a left click on top of the epoch. Bad
channels can be selected by clicking the channel name on the left side of
the main axes. Calling this function drops all the selected bad epochs as
well as bad epochs marked beforehand with rejection parameters.
Parameters
----------
epochs : instance of Epochs
The epochs object.
%(picks_good_data)s
scalings : dict | 'auto' | None
Scaling factors for the traces. If any fields in scalings are 'auto',
the scaling factor is set to match the 99.5th percentile of a subset of
the corresponding data. If scalings == 'auto', all scalings fields are
set to 'auto'. If any fields are 'auto' and data is not preloaded,
a subset of epochs up to 100mb will be loaded. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4,
whitened=10.)
n_epochs : int
The number of epochs per view. Defaults to 20.
n_channels : int
The number of channels per view. Defaults to 20.
title : str | None
The title of the window. If None, epochs name will be displayed.
Defaults to None.
events : None, array, shape (n_events, 3)
Events to show with vertical bars. If events are provided, the epoch
numbers are not shown to prevent overlap. You can toggle epoch
numbering through options (press 'o' key). You can use
`~mne.viz.plot_events` as a legend for the colors. By default, the
coloring scheme is the same.
.. warning:: If the epochs have been resampled, the events no longer
align with the data.
.. versionadded:: 0.14.0
event_colors : None, dict
Dictionary of event_id value and its associated color. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors). Uses the same
coloring scheme as `~mne.viz.plot_events`.
.. versionadded:: 0.14.0
order : array of str | None
Order in which to plot channel types.
.. versionadded:: 0.18.0
show : bool
Show figure if True. Defaults to True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on an epoch.
Defaults to False.
decim : int | 'auto'
Amount to decimate the data during display for speed purposes.
You should only decimate if the data are sufficiently low-passed,
otherwise aliasing can occur. The 'auto' mode (default) uses
the decimation that results in a sampling rate at least three times
larger than ``info['lowpass']`` (e.g., a 40 Hz lowpass will result in
at least a 120 Hz displayed sample rate).
.. versionadded:: 0.15.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channels are scaled by ``scalings['whitened']``,
and their channel names are shown in italic.
Can be a string to load a covariance from disk.
See also :meth:`mne.Evoked.plot_white` for additional inspection
of noise covariance properties when whitening evoked data.
For data processed with SSS, the effective dependence between
magnetometers and gradiometers may introduce differences in scaling,
consider using :meth:`mne.Evoked.plot_white`.
.. versionadded:: 0.16.0
butterfly : bool
Whether to directly call the butterfly view.
.. versionadded:: 0.18.0
%(show_scrollbars)s
epoch_colors : list of (n_epochs) list (of n_channels) | None
Colors to use for individual epochs. If None, use default colors.
event_id : dict | None
Dictionary of event labels (e.g. 'aud_l') as keys and associated event
integers as values. Useful when ``events`` contains event numbers not
present in ``epochs.event_id`` (e.g., because of event subselection).
Values in ``event_id`` will take precedence over those in
``epochs.event_id`` when there are overlapping keys.
.. versionadded:: 0.20
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
Notes
-----
The arrow keys (up/down/left/right) can be used to navigate between
channels and epochs and the scaling can be adjusted with - and + (or =)
keys, but this depends on the backend matplotlib is configured to use
(e.g., mpl.use(``TkAgg``) should work). Full screen mode can be toggled
with f11 key. The amount of epochs and channels per view can be adjusted
with home/end and page down/page up keys. These can also be set through
options dialog by pressing ``o`` key. ``h`` key plots a histogram of
peak-to-peak values along with the used rejection thresholds. Butterfly
plot can be toggled with ``b`` key. Right mouse click adds a vertical line
to the plot. Click 'help' button at bottom left corner of the plotter to
view all the options.
.. versionadded:: 0.10.0
"""
epochs.drop_bad()
scalings = _compute_scalings(scalings, epochs)
scalings = _handle_default('scalings_plot_raw', scalings)
decim, data_picks = _handle_decim(epochs.info.copy(), decim, None)
projs = epochs.info['projs']
noise_cov = _check_cov(noise_cov, epochs.info)
params = dict(epochs=epochs, info=epochs.info.copy(), t_start=0,
bad_color=(0.8, 0.8, 0.8), histogram=None, decim=decim,
data_picks=data_picks, noise_cov=noise_cov,
use_noise_cov=noise_cov is not None,
show_scrollbars=show_scrollbars,
epoch_colors=epoch_colors)
params['label_click_fun'] = partial(_pick_bad_channels, params=params)
_prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks, events=events, order=order,
event_colors=event_colors, butterfly=butterfly,
event_id=event_id)
_prepare_projectors(params)
callback_close = partial(_close_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
@verbose
def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None,
proj=False, bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, ax=None, color='black',
xscale='linear', area_mode='std', area_alpha=0.33,
dB=True, estimate='auto', show=True, n_jobs=1,
average=False, line_alpha=None, spatial_colors=True,
sphere=None, verbose=None):
"""%(plot_psd_doc)s.
Parameters
----------
epochs : instance of Epochs
The epochs object.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
%(plot_psd_picks_good_data)s
ax : instance of Axes | None
Axes to plot into. If None, axes will be created.
%(plot_psd_color)s
%(plot_psd_xscale)s
%(plot_psd_area_mode)s
%(plot_psd_area_alpha)s
%(plot_psd_dB)s
%(plot_psd_estimate)s
%(show)s
%(n_jobs)s
%(plot_psd_average)s
%(plot_psd_line_alpha)s
%(plot_psd_spatial_colors)s
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
fig : instance of Figure
Figure with frequency spectra of the data channels.
"""
from .utils import _set_psd_plot_params, _plot_psd
fig, picks_list, titles_list, units_list, scalings_list, ax_list, \
make_label, xlabels_list = \
_set_psd_plot_params(epochs.info, proj, picks, ax, area_mode)
_check_psd_fmax(epochs, fmax)
del ax
psd_list = list()
for picks in picks_list:
# Multitaper used for epochs instead of Welch, because Welch chunks
# the data; epoched data are by nature already chunked, however.
psd, freqs = psd_multitaper(epochs, picks=picks, fmin=fmin,
fmax=fmax, tmin=tmin, tmax=tmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias,
normalization=normalization, proj=proj,
n_jobs=n_jobs)
psd_list.append(np.mean(psd, axis=0))
fig = _plot_psd(epochs, fig, freqs, psd_list, picks_list, titles_list,
units_list, scalings_list, ax_list, make_label, color,
area_mode, area_alpha, dB, estimate, average,
spatial_colors, xscale, line_alpha, sphere, xlabels_list)
plt_show(show)
return fig
def _prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks, events=None, event_colors=None,
order=None, butterfly=False, info=None,
event_id=None):
"""Set up the mne_browse_epochs window."""
import matplotlib as mpl
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
epochs = params['epochs']
info = info or epochs.info
orig_epoch_times, epochs_events = epochs.times, epochs.events
name = epochs._name
del epochs
# Reorganize channels
picks = _picks_to_idx(info, picks)
picks = sorted(picks)
# channel type string for every channel
types = _get_channel_types(info, picks)
# list of unique channel types
unique_types = _get_channel_types(info, unique=True)
if order is None:
order = _DATA_CH_TYPES_ORDER_DEFAULT
inds = [pick_idx for order_type in order
for pick_idx, ch_type in zip(picks, types)
if order_type == ch_type]
if len(unique_types) > len(order):
ch_missing = unique_types - set(order)
raise RuntimeError('%s are in picks but not in order.'
' Please specify all channel types picked.' %
(str(ch_missing)))
types = sorted(types, key=order.index)
if not len(inds) == len(picks):
raise RuntimeError('Some channels not classified. Please'
' check your picks')
ch_names = [params['info']['ch_names'][idx] for idx in inds]
_validate_type(params['epoch_colors'], (list, None), 'epoch_colors')
if params['epoch_colors'] is not None:
if len(params['epoch_colors']) != len(params['epochs'].events):
raise ValueError('epoch_colors must be list of len(epochs.events).'
' Got %s' % len(params['epoch_colors']))
for epoch_idx in range(len(params['epoch_colors'])):
these_colors = params['epoch_colors'][epoch_idx]
_validate_type(these_colors, list,
'epoch_colors[%s]' % (epoch_idx,))
if len(these_colors) != len(params['epochs'].ch_names):
raise ValueError('epoch_colors for the %dth epoch '
'has length %d, expected %d.'
% (epoch_idx, len(these_colors),
len(params['epochs'].ch_names)))
params['epoch_colors'][epoch_idx] = \
[these_colors[idx] for idx in inds]
# set up plotting
n_epochs = min(n_epochs, len(epochs_events))
duration = len(orig_epoch_times) * n_epochs
n_channels = min(n_channels, len(picks))
if title is None:
title = name
if title is None or len(title) == 0:
title = ''
color = _handle_default('color', None)
figsize = _get_figsize_from_config()
params['fig'] = figure_nobar(facecolor='w', figsize=figsize, dpi=80)
_set_window_title(params['fig'], title or 'Epochs')
_prepare_mne_browse(params, xlabel='Epochs')
ax = params['ax']
ax_hscroll = params['ax_hscroll']
ax_vscroll = params['ax_vscroll']
# add secondary x axis for annotations / event labels
ax2 = ax.twiny()
ax2.set_zorder(-1)
ax2.set_axes_locator(ax.get_axes_locator())
# set axis lims
ax.axis([0, duration, 0, 200])
ax2.axis([0, duration, 0, 200])
# populate vertical and horizontal scrollbars
ax_vscroll.add_patch(mpl.patches.Rectangle((0, 0), 1, len(picks),
facecolor='w', zorder=3))
for ci in range(len(picks)):
if ch_names[ci] in params['info']['bads']:
this_color = params['bad_color']
else:
this_color = color[types[ci]]
ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
facecolor=this_color,
edgecolor=this_color,
zorder=4))
vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
edgecolor='w', facecolor='w', zorder=5)
ax_vscroll.add_patch(vsel_patch)
ax_vscroll.set_ylim(len(types), 0)
ax_vscroll.set_title('Ch.')
# populate colors list
type_colors = [colorConverter.to_rgba(color[c]) for c in types]
colors = list()
for color_idx in range(len(type_colors)):
colors.append([type_colors[color_idx]] * len(epochs_events))
lines = list()
n_times = len(orig_epoch_times)
for ch_idx in range(n_channels):
if len(colors) - 1 < ch_idx:
break
lc = LineCollection(list(), antialiased=True, linewidths=0.5,
zorder=3, picker=True)
lc.set_pickradius(3.)
ax.add_collection(lc)
lines.append(lc)
data = np.zeros((params['info']['nchan'],
len(orig_epoch_times) * n_epochs))
ylim = (25., 0.) # Hardcoded 25 because butterfly has max 5 rows (5*5=25).
# make shells for plotting traces
offset = ylim[0] / n_channels
offsets = np.arange(n_channels) * offset + (offset / 2.)
times = np.arange(len(orig_epoch_times) * len(epochs_events))
epoch_times = np.arange(0, len(times), n_times)
ax.set_yticks(offsets)
ax.set_ylim(ylim)
ticks = epoch_times + 0.5 * n_times
for ax_ in (ax, ax2):
ax_.set_xticks(ticks[:n_epochs])
labels = list(range(0, len(ticks))) # epoch numbers
ax.set_xticklabels(labels[:n_epochs])
xlim = epoch_times[-1] + len(orig_epoch_times)
ax_hscroll.set_xlim(0, xlim)
vertline_t = ax_hscroll.text(0, 1, '', color='y', va='bottom', ha='right')
# fit horizontal scroll bar ticks
hscroll_ticks = np.arange(0, xlim, xlim / 7.0)
hscroll_ticks = np.append(hscroll_ticks, epoch_times[-1])
hticks = list()
for tick in hscroll_ticks:
hticks.append(epoch_times.flat[np.abs(epoch_times - tick).argmin()])
hlabels = [x // n_times for x in hticks]
ax_hscroll.set_xticks(hticks)
ax_hscroll.set_xticklabels(hlabels)
for epoch_idx in range(len(epoch_times)):
ax_hscroll.add_patch(mpl.patches.Rectangle((epoch_idx * n_times, 0),
n_times, 1, facecolor='w',
edgecolor='w', alpha=0.6))
hsel_patch = mpl.patches.Rectangle((0, 0), duration, 1,
edgecolor='k',
facecolor=(0.75, 0.75, 0.75),
alpha=0.25, linewidth=1, clip_on=False)
ax_hscroll.add_patch(hsel_patch)
text = ax.text(0, 0, 'blank', zorder=3, verticalalignment='baseline',
ha='left', fontweight='bold')
text.set_visible(False)
epoch_nr = True
if events is not None:
event_set = set(events[:, 2])
ev_id = params['epochs'].event_id if event_id is None else event_id
event_colors = _handle_event_colors(event_colors, event_set, ev_id)
epoch_nr = False # epoch number off by default to avoid overlap
for label in ax.xaxis.get_ticklabels():
label.set_visible(False)
params.update({'ax': ax,
'ax2': ax2,
'ax_hscroll': ax_hscroll,
'ax_vscroll': ax_vscroll,
'vsel_patch': vsel_patch,
'hsel_patch': hsel_patch,
'lines': lines, # vertical lines for segmentation
'projs': projs,
'ch_names': ch_names,
'n_channels': n_channels,
'n_epochs': n_epochs,
'scalings': scalings,
'duration': duration,
'ch_start': 0,
'colors': colors,
'def_colors': type_colors, # don't change at runtime
'picks': picks,
'bads': np.array(list(), dtype=int),
'data': data,
'times': times,
'epoch_times': epoch_times,
'offsets': offsets,
'labels': labels,
'scale_factor': 1.0,
'butterfly_scale': 1.0,
'fig_proj': None,
'types': np.array(types),
'inds': inds,
'vert_lines': list(),
'vertline_t': vertline_t,
'butterfly': butterfly,
'text': text,
'fig_options': None,
'settings': [True, True, epoch_nr, True],
'image_plot': None,
'events': events,
'event_colors': event_colors,
'ev_lines': list(),
'ev_texts': list(),
'ann': list(), # list for butterfly view annotations
'order': order,
'ch_types': unique_types})
params['plot_fun'] = partial(_plot_traces, params=params)
# Plot epoch_colors
if params['epoch_colors'] is not None:
for epoch_idx, epoch_color in enumerate(params['epoch_colors']):
for ch_idx in range(len(params['ch_names'])):
if epoch_color[ch_idx] is not None:
params['colors'][ch_idx][epoch_idx] = \
colorConverter.to_rgba(epoch_color[ch_idx])
# plot on horizontal patch if all colors are same
if epoch_color.count(epoch_color[0]) == len(epoch_color):
params['ax_hscroll'].patches[epoch_idx].set_color(
epoch_color[0])
params['ax_hscroll'].patches[epoch_idx].set_zorder(3)
params['ax_hscroll'].patches[epoch_idx].set_edgecolor('w')
# callbacks
callback_scroll = partial(_plot_onscroll, params=params)
params['fig'].canvas.mpl_connect('scroll_event', callback_scroll)
callback_click = partial(_mouse_click, params=params)
params['fig'].canvas.mpl_connect('button_press_event', callback_click)
callback_key = partial(_plot_onkey, params=params)
params['fig'].canvas.mpl_connect('key_press_event', callback_key)
params['fig'].canvas.mpl_connect('pick_event', partial(_onpick,
params=params))
params['callback_key'] = callback_key
# Draw event lines for the first time.
_plot_vert_lines(params)
def _prepare_projectors(params):
"""Set up the projectors for epochs browser."""
import matplotlib as mpl
epochs = params['epochs']
projs = params['projs']
if len(projs) > 0 and not epochs.proj:
# set up proj button
ax_button = params['fig'].add_axes(params['proj_button_pos'])
ax_button.set_axes_locator(params['proj_button_locator'])
opt_button = mpl.widgets.Button(ax_button, 'Proj')
callback_option = partial(_toggle_options, params=params)
opt_button.on_clicked(callback_option)
params['opt_button'] = opt_button
params['apply_proj'] = epochs.proj
# As here code is shared with plot_evoked, some extra steps:
# first the actual plot update function
params['plot_update_proj_callback'] = _plot_update_epochs_proj
# then the toggle handler
callback_proj = partial(_toggle_proj, params=params)
# store these for use by callbacks in the options figure
params['callback_proj'] = callback_proj
callback_proj('none')
def _plot_traces(params):
"""Plot concatenated epochs."""
params['text'].set_visible(False)
ax = params['ax']
butterfly = params['butterfly']
offsets = params['offsets']
lines = params['lines']
epochs = params['epochs']
if butterfly:
ch_start = 0
n_channels = len(params['picks'])
data = params['data'] * params['butterfly_scale']
_prepare_butterfly(params)
else:
ch_start = params['ch_start']
n_channels = params['n_channels']
data = params['data'] * params['scale_factor']
n_times = len(epochs.times)
tick_list = list()
start_idx = params['t_start'] // n_times
end = params['t_start'] + params['duration']
end_idx = end // n_times
xlabels = params['labels'][start_idx:end_idx]
event_ids = params['epochs'].events[start_idx:end_idx, 2]
params['ax2'].set_xticklabels(event_ids)
ax.set_xticklabels(xlabels)
del event_ids, xlabels
ylabels = ax.yaxis.get_ticklabels()
# do the plotting
for line_idx in range(n_channels):
ch_idx = line_idx + ch_start
if line_idx >= len(lines):
break
elif ch_idx < len(params['ch_names']):
if butterfly:
# determine offsets for signal traces
ch_type = params['types'][ch_idx]
chan_types_split = sorted(set(params['ch_types']) &
set(_DATA_CH_TYPES_SPLIT),
key=params['order'].index)
ylim = ax.get_ylim()[0]
ticks = np.arange(
0, ylim, ylim / (4 * max(len(chan_types_split), 1)))
offset_pos = np.arange(2, len(chan_types_split) * 4, 4)
if ch_type in chan_types_split:
offset = ticks[offset_pos[chan_types_split.index(ch_type)]]
else:
lines[line_idx].set_segments(list())
offset = None
else:
tick_list += [params['ch_names'][ch_idx]]
offset = offsets[line_idx]
if offset is None:
continue
if params['inds'][ch_idx] in params['data_picks']:
this_decim = params['decim']
else:
this_decim = 1
this_data = data[ch_idx]
# subtraction here gets correct orientation for flipped ylim
ydata = offset - this_data
xdata = params['times'][:params['duration']]
num_epochs = np.min([params['n_epochs'], len(epochs.events)])
segments = np.split(np.array((xdata, ydata)).T, num_epochs)
segments = [segment[::this_decim] for segment in segments]
ch_name = params['ch_names'][ch_idx]
if ch_name in params['info']['bads']:
if not butterfly:
this_color = params['bad_color']
ylabels[line_idx].set_color(this_color)
this_color = np.tile((params['bad_color']), (num_epochs, 1))
for bad_idx in params['bads']:
if bad_idx < start_idx or bad_idx >= end_idx:
continue
this_color[bad_idx - start_idx] = (1., 0., 0.)
lines[line_idx].set_zorder(2)
else:
this_color = params['colors'][ch_idx][start_idx:end_idx]
lines[line_idx].set_zorder(3)
if not butterfly:
ylabels[line_idx].set_color('black')
lines[line_idx].set_segments(segments)
lines[line_idx].set_color(this_color)
else:
lines[line_idx].set_segments(list())
# finalize plot
ax.set_xlim(params['times'][0], params['times'][0] + params['duration'],
False)
params['ax2'].set_xlim(params['times'][0],
params['times'][0] + params['duration'], False)
if butterfly:
# compute labels for ticks surrounding the trace offset
factor = -1. / params['butterfly_scale']
scalings_default = _handle_default('scalings')
chan_types_split = sorted(set(params['types']) &
set(_DATA_CH_TYPES_SPLIT),
key=params['order'].index)
ylim = ax.get_ylim()[0]
ticks = np.arange(
0, ylim + 1, ylim / (4 * max(len(chan_types_split), 1)))
offset_pos = np.arange(2, (len(chan_types_split) * 4) + 1, 4)
ax.set_yticks(ticks)
labels = [''] * len(ticks)
labels = [0 if idx in range(2, len(labels), 4) else label
for idx, label in enumerate(labels)]
for idx_chan, chan_type in enumerate(chan_types_split):
tick_top, tick_bottom = 1 + idx_chan * 4, 3 + idx_chan * 4
offset = ticks[offset_pos[idx_chan]]
for tick_pos in [tick_top, tick_bottom]:
tickoffset_diff = ticks[tick_pos] - offset
labels[tick_pos] = (tickoffset_diff *
params['scalings'][chan_type] *
factor * scalings_default[chan_type])
# Heuristic to turn floats to ints where possible (e.g. -500.0 to -500)
for li, label in enumerate(labels):
if isinstance(label, float) and float(str(label)) != round(label):
labels[li] = round(label, 2)
ax.set_yticklabels(labels, fontsize=12, color='black')
else:
ax.set_yticks(params['offsets'][:len(tick_list)])
ax.set_yticklabels(tick_list, fontsize=12)
_set_ax_label_style(ax, params)
if params['events'] is not None: # vertical lines for events.
_ = _draw_event_lines(params)
params['vsel_patch'].set_y(ch_start)
params['fig'].canvas.draw()
# XXX This is a hack to make sure this figure gets drawn last
# so that when matplotlib goes to calculate bounds we don't get a
# CGContextRef error on the MacOSX backend :(
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _plot_update_epochs_proj(params, bools=None):
"""Deal with proj changed."""
if bools is not None:
inds = np.where(bools)[0]
params['info']['projs'] = [deepcopy(params['projs'][ii])
for ii in inds]
params['proj_bools'] = bools
epochs = params['epochs']
n_epochs = params['n_epochs']
params['projector'], params['whitened_ch_names'] = _setup_plot_projector(
params['info'], params['noise_cov'], True, params['use_noise_cov'])
start = params['t_start'] // len(epochs.times)
end = start + n_epochs
if epochs.preload:
data = np.concatenate(epochs.get_data()[start:end], axis=1)
else:
# this is faster than epochs.get_data()[start:end] when not preloaded
data = np.concatenate(epochs[start:end].get_data(), axis=1)
if params['projector'] is not None:
data = np.dot(params['projector'], data)
types = params['types']
for pick, ind in enumerate(params['inds']):
ch_name = params['info']['ch_names'][ind]
if ch_name in params['whitened_ch_names'] and \
ch_name not in params['info']['bads']:
norm = params['scalings']['whitened']
else:
norm = params['scalings'][types[pick]]
params['data'][pick] = data[ind] / norm
params['plot_fun']()
def _handle_picks(epochs):
"""Handle picks."""
if any('ICA' in k for k in epochs.ch_names):
picks = pick_types(epochs.info, meg=True, misc=True, ref_meg=False,
exclude=[])
else:
picks = pick_types(epochs.info, meg=True, eeg=True, eog=True, ecg=True,
seeg=True, ecog=True, ref_meg=False, fnirs=True,
exclude=[])
return picks
def _plot_window(value, params):
"""Deal with horizontal shift of the viewport."""
max_times = len(params['times']) - params['duration']
value = int(round(value))
if value > max_times:
value = len(params['times']) - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
params['plot_update_proj_callback'](params)
def _plot_vert_lines(params):
"""Plot vertical lines."""
ax = params['ax']
while len(ax.lines) > 0:
ax.lines.pop()
params['vert_lines'] = list()
params['ev_lines'] = list()
params['vertline_t'].set_text('')
epochs = params['epochs']
# draw event lines
tzero_already_drawn = False
if params['events'] is not None:
tzero_already_drawn = _draw_event_lines(params)
# draw zero lines
if params['settings'][3] and not tzero_already_drawn:
t_zero = np.where(epochs.times == 0.)[0]
if len(t_zero) == 1: # not True if tmin > 0
for event_idx in range(len(epochs.events)):
pos = [event_idx * len(epochs.times) + t_zero[0],
event_idx * len(epochs.times) + t_zero[0]]
ax.plot(pos, ax.get_ylim(), 'g', zorder=0, alpha=0.4)
# draw boundaries between epochs
for epoch_idx in range(len(epochs.events)):
pos = [epoch_idx * len(epochs.times), epoch_idx * len(epochs.times)]
ax.plot(pos, ax.get_ylim(), color='black', linestyle='--', zorder=2)
def _pick_bad_epochs(event, params):
"""Select / drop bad epochs."""
if 'ica' in params:
pos = (event.xdata, event.ydata)
_pick_bad_channels(pos, params)
return
n_times = len(params['epochs'].times)
start_idx = int(params['t_start'] / n_times)
xdata = event.xdata
xlim = event.inaxes.get_xlim()
epoch_idx = start_idx + int(xdata / (xlim[1] / params['n_epochs']))
total_epochs = len(params['epochs'].events)
if epoch_idx > total_epochs - 1:
return
# remove bad epoch
if epoch_idx in params['bads']:
params['bads'] = params['bads'][(params['bads'] != epoch_idx)]
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = params['def_colors'][ch_idx]
params['ax_hscroll'].patches[epoch_idx].set_color('w')
params['ax_hscroll'].patches[epoch_idx].set_zorder(2)
params['plot_fun']()
return
# add bad epoch
params['bads'] = np.append(params['bads'], epoch_idx)
params['ax_hscroll'].patches[epoch_idx].set_color((1., 0., 0., 1.))
params['ax_hscroll'].patches[epoch_idx].set_zorder(3)
params['ax_hscroll'].patches[epoch_idx].set_edgecolor('w')
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = (1., 0., 0., 1.)
params['plot_fun']()
def _pick_bad_channels(pos, params):
"""Select bad channels."""
text, ch_idx = _label2idx(params, pos)
if text is None:
return
if text in params['info']['bads']:
while text in params['info']['bads']:
params['info']['bads'].remove(text)
color = params['def_colors'][ch_idx]
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
else:
params['info']['bads'].append(text)
color = params['bad_color']
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
if 'ica' in params:
params['plot_fun']()
else:
params['plot_update_proj_callback'](params)
def _plot_onscroll(event, params):
"""Handle scroll events."""
if event.key == 'control':
if event.step < 0:
event.key = '-'
else:
event.key = '+'
_plot_onkey(event, params)
return
if params['butterfly']:
return
_plot_raw_onscroll(event, params, len(params['ch_names']))
def _mouse_click(event, params):
"""Handle mouse click events."""
from matplotlib.pyplot import fignum_exists
if event.inaxes is None:
if params['butterfly'] or not params['settings'][0]:
return
ax = params['ax']
ylim = ax.get_ylim()
pos = ax.transData.inverted().transform((event.x, event.y))
if pos[0] > 0 or pos[1] < 0 or pos[1] > ylim[0]:
return
if event.button == 1: # left click
params['label_click_fun'](pos)
elif event.button == 3: # right click
if 'ica' not in params:
_, ch_idx = _label2idx(params, pos)
if ch_idx is None:
return
if channel_type(params['info'], ch_idx) not in ['mag', 'grad',
'eeg', 'eog']:
logger.info('Event related fields / potentials only '
'available for MEG and EEG channels.')
return
# check if the figure was already closed
if (params['image_plot'] is not None and
not fignum_exists(params['image_plot'].number)):
params['image_plot'] = None
fig = plot_epochs_image(params['epochs'],
picks=params['inds'][ch_idx],
fig=params['image_plot'],
clear=True)[0]
params['image_plot'] = fig
elif event.button == 1: # left click
# vertical scroll bar changed
if event.inaxes == params['ax_vscroll']:
if params['butterfly']:
return
# Don't let scrollbar go outside vertical scrollbar limits
# XXX: floating point exception on some machines if this happens.
ch_start = min(
max(int(event.ydata) - params['n_channels'] // 2, 0),
len(params['ch_names']) - params['n_channels'])
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scroll bar changed
elif event.inaxes == params['ax_hscroll']:
# find the closest epoch time
times = params['epoch_times']
offset = 0.5 * params['n_epochs'] * len(params['epochs'].times)
xdata = times.flat[np.abs(times - (event.xdata - offset)).argmin()]
_plot_window(xdata, params)
# main axes
elif event.inaxes == params['ax']:
_pick_bad_epochs(event, params)
elif event.inaxes == params['ax'] and event.button == 2: # middle click
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
elif event.inaxes == params['ax'] and event.button == 3: # right click
n_times = len(params['epochs'].times)
xdata = int(event.xdata % n_times)
prev_xdata = 0
if len(params['vert_lines']) > 0:
prev_xdata = params['vert_lines'][0][0].get_data()[0][0]
while len(params['vert_lines']) > 0:
params['ax'].lines.remove(params['vert_lines'][0][0])
params['vert_lines'].pop(0)
if prev_xdata == xdata: # lines removed
params['vertline_t'].set_text('')
params['plot_fun']()
return
ylim = params['ax'].get_ylim()
for epoch_idx in range(params['n_epochs']): # plot lines
pos = [epoch_idx * n_times + xdata, epoch_idx * n_times + xdata]
params['vert_lines'].append(params['ax'].plot(pos, ylim, 'y',
zorder=5))
params['vertline_t'].set_text('%0.3f' % params['epochs'].times[xdata])
params['plot_fun']()
def _plot_onkey(event, params):
"""Handle key presses."""
import matplotlib.pyplot as plt
if event.key == 'down':
if params['butterfly']:
return
params['ch_start'] += params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'up':
if params['butterfly']:
return
params['ch_start'] -= params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'left':
sample = params['t_start'] - params['duration']
sample = np.max([0, sample])
_plot_window(sample, params)
elif event.key == 'right':
sample = params['t_start'] + params['duration']
sample = np.min([sample, params['times'][-1] - params['duration']])
times = params['epoch_times']
xdata = times.flat[np.abs(times - sample).argmin()]
_plot_window(xdata, params)
elif event.key == '-':
if params['butterfly']:
params['butterfly_scale'] /= 1.1
else:
params['scale_factor'] /= 1.1
params['plot_fun']()
elif event.key in ['+', '=']:
if params['butterfly']:
params['butterfly_scale'] *= 1.1
else:
params['scale_factor'] *= 1.1
params['plot_fun']()
elif event.key == 'f11':
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
elif event.key == 'pagedown':
if params['n_channels'] == 1 or params['butterfly']:
return
n_channels = params['n_channels'] - 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].collections.pop()
params['ax'].set_yticks(params['offsets'])
params['lines'].pop()
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'pageup':
if params['butterfly']:
return
from matplotlib.collections import LineCollection
n_channels = params['n_channels'] + 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
lc = LineCollection(list(), antialiased=True, linewidths=0.5,
zorder=3, picker=True)
lc.set_pickradius(3.)
params['ax'].add_collection(lc)
params['ax'].set_yticks(params['offsets'])
params['lines'].append(lc)
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'home':
n_epochs = params['n_epochs'] - 1
if n_epochs <= 0:
return
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
for key in ('ax', 'ax2'):
params[key].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] -= n_times
params['hsel_patch'].set_width(params['duration'])
params['data'] = params['data'][:, :-n_times]
params['plot_update_proj_callback'](params)
elif event.key == 'end':
n_epochs = params['n_epochs'] + 1
n_times = len(params['epochs'].times)
if n_times * n_epochs > len(params['times']):
return
ticks = params['epoch_times'] + 0.5 * n_times
for key in ('ax', 'ax2'):
params[key].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
if len(params['vert_lines']) > 0:
ax = params['ax']
pos = params['vert_lines'][0][0].get_data()[0] + params['duration']
params['vert_lines'].append(ax.plot(pos, ax.get_ylim(), 'y',
zorder=4))
params['duration'] += n_times
if params['t_start'] + params['duration'] > len(params['times']):
params['t_start'] -= n_times
params['hsel_patch'].set_x(params['t_start'])
params['hsel_patch'].set_width(params['duration'])
params['data'] = np.zeros((len(params['data']), params['duration']))
params['plot_update_proj_callback'](params)
elif event.key == 'b':
params['butterfly'] = not params['butterfly']
if params['fig_options'] is not None:
plt.close(params['fig_options'])
params['fig_options'] = None
_prepare_butterfly(params)
params['plot_fun']()
elif event.key == 'w':
params['use_noise_cov'] = not params['use_noise_cov']
_plot_update_epochs_proj(params)
_plot_traces(params)
elif event.key == 'o':
if not params['butterfly']:
_open_options(params)
elif event.key == 'h':
_plot_histogram(params)
elif event.key == '?':
_onclick_help(event, params)
elif event.key == 'escape':
plt.close(params['fig'])
elif event.key == 'z':
# zen mode: remove scrollbars and buttons
_toggle_scrollbars(params)
def _prepare_butterfly(params):
"""Set up butterfly plot."""
from matplotlib.collections import LineCollection
import matplotlib as mpl
if params['butterfly']:
units = _handle_default('units')
chan_types = sorted(set(params['types']) & set(params['order']),
key=params['order'].index)
if len(chan_types) < 1:
return
params['ax_vscroll'].set_visible(False)
ax = params['ax']
labels = ax.yaxis.get_ticklabels()
for label in labels:
label.set_visible(True)
offsets = np.arange(0, ax.get_ylim()[0],
ax.get_ylim()[0] / (4 * len(chan_types)))
ticks = offsets
ticks = [ticks[x] if x < len(ticks) else 0 for x in range(20)]
ax.set_yticks(ticks)
used_types = 0
params['offsets'] = [ticks[2]]
# checking which annotations are displayed and removing them
ann = params['ann']
annotations = [child for child in params['ax2'].get_children()
if isinstance(child, mpl.text.Annotation)]
for annote in annotations:
annote.remove()
ann[:] = list()
assert len(params['ann']) == 0
titles = _handle_default('titles')
for chan_type in chan_types:
unit = units[chan_type]
pos = (0, 1 - (ticks[2 + 4 * used_types] / ax.get_ylim()[0]))
ann.append(params['ax2'].annotate(
'%s (%s)' % (titles[chan_type], unit), xy=pos,
xytext=(-70, 0), ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points'))
used_types += 1
while len(params['lines']) < len(params['picks']):
lc = LineCollection(list(), antialiased=True, linewidths=.5,
zorder=3, picker=True)
lc.set_pickradius(3.)
ax.add_collection(lc)
params['lines'].append(lc)
else: # change back to default view
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
params['ax_vscroll'].set_visible(True)
while len(params['ax2'].texts) > 0:
params['ax2'].texts.pop()
n_channels = params['n_channels']
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
ylim = (25., 0.)
params['ax'].set_ylim(ylim)
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['ax'].set_yticks(params['offsets'])
def _onpick(event, params):
"""Add a channel name on click."""
if event.mouseevent.button != 2 or not params['butterfly']:
return # text label added with a middle mouse button
lidx = np.where([line is event.artist for line in params['lines']])[0][0]
text = params['text']
text.set_x(event.mouseevent.xdata)
text.set_y(event.mouseevent.ydata)
text.set_text(params['ch_names'][lidx])
text.set_visible(True)
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use _mouse_click (happens once per click)
# to do the drawing
def _close_event(event, params):
"""Drop selected bad epochs (called on closing of the plot)."""
params['epochs'].drop(params['bads'])
params['epochs'].info['bads'] = params['info']['bads']
logger.info('Channels marked as bad: %s' % params['epochs'].info['bads'])
def _update_channels_epochs(event, params):
"""Change the amount of channels and epochs per view."""
from matplotlib.collections import LineCollection
# Channels
n_channels = int(np.around(params['channel_slider'].val))
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
while len(params['lines']) < n_channels:
lc = LineCollection(list(), linewidths=0.5, antialiased=True,
zorder=3, picker=True)
lc.set_pickradius(3.)
params['ax'].add_collection(lc)
params['lines'].append(lc)
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
params['n_channels'] = n_channels
# Epochs
n_epochs = int(np.around(params['epoch_slider'].val))
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
for key in ('ax', 'ax2'):
params[key].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] = n_times * n_epochs
params['hsel_patch'].set_width(params['duration'])
params['data'] = np.zeros((len(params['data']), params['duration']))
if params['t_start'] + n_times * n_epochs > len(params['times']):
params['t_start'] = len(params['times']) - n_times * n_epochs
params['hsel_patch'].set_x(params['t_start'])
params['plot_update_proj_callback'](params)
def _toggle_labels(label, params):
"""Toggle axis labels."""
if label == 'Channel names visible':
params['settings'][0] = not params['settings'][0]
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
elif label == 'Event-id visible':
params['settings'][1] = not params['settings'][1]
labels = params['ax2'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][1])
elif label == 'Epoch-id visible':
params['settings'][2] = not params['settings'][2]
labels = params['ax'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][2])
elif label == 'Zeroline visible':
params['settings'][3] = not params['settings'][3]
_plot_vert_lines(params)
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _open_options(params):
"""Open the option window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
if params['fig_options'] is not None:
# turn off options dialog
plt.close(params['fig_options'])
params['fig_options'] = None
return
width = 10
height = 3
fig_options = figure_nobar(figsize=(width, height), dpi=80)
_set_window_title(fig_options, 'View settings')
params['fig_options'] = fig_options
ax_channels = plt.axes([0.15, 0.1, 0.65, 0.1])
ax_epochs = plt.axes([0.15, 0.25, 0.65, 0.1])
ax_button = plt.axes([0.85, 0.1, 0.1, 0.25])
ax_check = plt.axes([0.15, 0.4, 0.4, 0.55])
plt.axis('off')
params['update_button'] = mpl.widgets.Button(ax_button, 'Update')
params['channel_slider'] = mpl.widgets.Slider(ax_channels, 'Channels', 1,
len(params['ch_names']),
valfmt='%0.0f',
valinit=params['n_channels'])
params['epoch_slider'] = mpl.widgets.Slider(ax_epochs, 'Epochs', 1,
len(params['epoch_times']),
valfmt='%0.0f',
valinit=params['n_epochs'])
params['checkbox'] = mpl.widgets.CheckButtons(ax_check,
['Channel names visible',
'Event-id visible',
'Epoch-id visible',
'Zeroline visible'],
actives=params['settings'])
update = partial(_update_channels_epochs, params=params)
params['update_button'].on_clicked(update)
labels_callback = partial(_toggle_labels, params=params)
params['checkbox'].on_clicked(labels_callback)
close_callback = partial(_settings_closed, params=params)
params['fig_options'].canvas.mpl_connect('close_event', close_callback)
try:
params['fig_options'].canvas.draw()
params['fig_options'].show(warn=False)
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
except Exception:
pass
def _settings_closed(events, params):
"""Handle close event from settings dialog."""
params['fig_options'] = None
def _plot_histogram(params):
"""Plot histogram of peak-to-peak values."""
import matplotlib.pyplot as plt
epochs = params['epochs']
p2p = np.ptp(epochs.get_data(), axis=2)
types = list()
data = list()
if 'eeg' in params['types']:
eegs = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'eeg'])
data.append(eegs.ravel())
types.append('eeg')
if 'mag' in params['types']:
mags = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'mag'])
data.append(mags.ravel())
types.append('mag')
if 'grad' in params['types']:
grads = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'grad'])
data.append(grads.ravel())
types.append('grad')
params['histogram'] = plt.figure()
scalings = _handle_default('scalings')
units = _handle_default('units')
titles = _handle_default('titles')
colors = _handle_default('color')
for idx in range(len(types)):
ax = plt.subplot(len(types), 1, idx + 1)
plt.xlabel(units[types[idx]])
plt.ylabel('Count')
color = colors[types[idx]]
rej = None
if epochs.reject is not None and types[idx] in epochs.reject:
rej = epochs.reject[types[idx]] * scalings[types[idx]]
rng = [0., rej * 1.1]
else:
rng = None
plt.hist(data[idx] * scalings[types[idx]], bins=100, color=color,
range=rng)
if rej is not None:
ax.plot((rej, rej), (0, ax.get_ylim()[1]), color='r')
plt.title(titles[types[idx]])
params['histogram'].suptitle('Peak-to-peak histogram', y=0.99)
params['histogram'].subplots_adjust(hspace=0.6)
try:
params['histogram'].show(warn=False)
except Exception:
pass
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
plt.tight_layout(h_pad=0.7, pad=2)
def _label2idx(params, pos):
"""Handle click on labels (returns channel name and idx)."""
labels = params['ax'].yaxis.get_ticklabels()
offsets = np.array(params['offsets']) + params['offsets'][0]
line_idx = np.searchsorted(offsets, pos[1])
text = labels[line_idx].get_text()
if len(text) == 0:
return None, None
ch_idx = params['ch_start'] + line_idx
return text, ch_idx
def _draw_event_lines(params):
"""Draw event lines."""
includes_tzero = False
epochs = params['epochs']
n_times = len(epochs.times)
start_idx = params['t_start'] // n_times
color = params['event_colors']
ax = params['ax']
for ev_line in params['ev_lines']:
ax.lines.remove(ev_line) # clear the view first
for ev_text in params['ev_texts']:
ax.texts.remove(ev_text)
params['ev_texts'] = list()
params['ev_lines'] = list()
t_zero = np.where(epochs.times == 0.)[0] # idx of 0s
if len(t_zero) == 0:
t_zero = epochs.times[0] * -1 * epochs.info['sfreq'] # if tmin > 0
end = params['n_epochs'] + start_idx
samp_times = params['events'][:, 0]
for idx, event in enumerate(epochs.events[start_idx:end]):
event_mask = ((event[0] - t_zero < samp_times) &
(samp_times < event[0] + n_times - t_zero))
for ev in params['events'][event_mask]:
if ev[0] == event[0]:
includes_tzero = True
pos = [idx * n_times + ev[0] - event[0] + t_zero,
idx * n_times + ev[0] - event[0] + t_zero]
kwargs = {} if ev[2] not in color else {'color': color[ev[2]]}
params['ev_lines'].append(ax.plot(pos, ax.get_ylim(),
zorder=3, **kwargs)[0])
params['ev_texts'].append(ax.text(pos[0], ax.get_ylim()[0],
ev[2], color=color[ev[2]],
ha='center', va='top'))
return includes_tzero
| {
"content_hash": "e666dd491680e66f465031dec869069f",
"timestamp": "",
"source": "github",
"line_count": 2041,
"max_line_length": 79,
"avg_line_length": 43.29642332190103,
"alnum_prop": 0.5586071881223973,
"repo_name": "cjayb/mne-python",
"id": "cce5a80dc4aa4bf6c663f56031402333b0be757d",
"size": "88371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/viz/epochs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "Makefile",
"bytes": "4450"
},
{
"name": "Python",
"bytes": "7901053"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from tests.test_helper import *
from distutils.version import LooseVersion
import platform
import braintree
import requests
class TestHttp(unittest.TestCase):
if LooseVersion(requests.__version__) >= LooseVersion('1.0.0'):
SSLError = requests.exceptions.SSLError
else:
SSLError = requests.models.SSLError
@staticmethod
def get_http(environment):
config = Configuration(environment, "merchant_id", public_key="public_key", private_key="private_key")
return config.http()
@raises(AuthenticationError)
def test_successful_connection_sandbox(self):
http = self.get_http(Environment.Sandbox)
http.get("/")
@raises(AuthenticationError)
def test_successful_connection_production(self):
http = self.get_http(Environment.Production)
http.get("/")
def test_wrapping_http_exceptions(self):
config = Configuration(
Environment("test", "localhost", "1", False, None, Environment.Production.ssl_certificate),
"integration_merchant_id",
public_key="integration_public_key",
private_key="integration_private_key",
wrap_http_exceptions=True
)
gateway = braintree.braintree_gateway.BraintreeGateway(config)
try:
gateway.transaction.find("my_id")
except braintree.exceptions.unexpected_error.UnexpectedError:
correct_exception = True
except Exception:
correct_exception = False
self.assertTrue(correct_exception)
def test_unsuccessful_connection_to_good_ssl_server_with_wrong_cert(self):
if platform.system() == "Darwin":
return
#any endpoint that returns valid XML with a status of 3xx or less and serves SSL
environment = Environment("test", "aws.amazon.com/ec2", "443", "http://auth.venmo.dev:9292", True, Environment.Production.ssl_certificate)
http = self.get_http(environment)
try:
http.get("/")
except self.SSLError as e:
self.assertTrue("certificate verify failed" in str(e))
except AuthenticationError:
self.fail("Expected to receive an SSL error but received an Authentication Error instead, check your local openssl installation")
else:
self.fail("Expected to receive an SSL error but no exception was raised")
def test_unsuccessful_connection_to_ssl_server_with_wrong_domain(self):
#ip address of api.braintreegateway.com
environment = Environment("test", "204.109.13.121", "443", "http://auth.venmo.dev:9292", True, Environment.Production.ssl_certificate)
http = self.get_http(environment)
try:
http.get("/")
except self.SSLError:
pass
else:
self.fail("Expected to receive an SSL error but no exception was raised")
def test_timeouts(self):
config = Configuration(
Environment.Development,
"integration_merchant_id",
public_key="integration_public_key",
private_key="integration_private_key",
wrap_http_exceptions=True,
timeout=0.001
)
gateway = braintree.braintree_gateway.BraintreeGateway(config)
try:
gateway.transaction.find("my_id")
except braintree.exceptions.http.timeout_error.TimeoutError:
correct_exception = True
except Exception:
correct_exception = False
self.assertTrue(correct_exception)
| {
"content_hash": "c10cae3921e86082d087a92c0d11899c",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 146,
"avg_line_length": 37.744680851063826,
"alnum_prop": 0.6457158962795941,
"repo_name": "braintree/braintree_python",
"id": "b87218a01b97d732bdf6133745bdd028de07cffa",
"size": "3548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "252"
},
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "1338636"
},
{
"name": "Ruby",
"bytes": "2099"
},
{
"name": "Shell",
"bytes": "193"
}
],
"symlink_target": ""
} |
"""
Predict memory usage on parallelized runs.
"""
import argparse
import redmapper
parser = argparse.ArgumentParser(description="Predict memory usage when running in parallel.")
parser.add_argument('-c', '--configfile', action='store', type=str, required=True,
help='YAML config file')
parser.add_argument('-n', '--no_zred', action='store_true',
help='Do not include zred.')
parser.add_argument('-b', '--border_factor', action='store', type=float, required=False,
default=2.0, help='Approximate factor for border mem usage.')
args = parser.parse_args()
mem_predictor = redmapper.pipeline.MemPredict(args.configfile)
mem_predictor.predict_memory(include_zreds=not args.no_zred, border_factor=args.border_factor)
| {
"content_hash": "cc4d27099f57368b67168a000468d90c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 94,
"avg_line_length": 32.541666666666664,
"alnum_prop": 0.6888604353393086,
"repo_name": "erykoff/redmapper",
"id": "512a3a00f02f58df6b5f0d89e4993dc388f80519",
"size": "803",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "bin/redmapper_predict_memory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "35922"
},
{
"name": "Dockerfile",
"bytes": "1872"
},
{
"name": "Python",
"bytes": "971787"
}
],
"symlink_target": ""
} |
import wx
import PanelData
import PanelAnalysis
import Calculation
class MainFrame(wx.Frame):
"""
Initiation of MainFrame with PanelData, PanelOption and ButtonStart
"""
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, title="STEN 1.0",
pos=(0, 0), size=(1000, 500))
# Panel: MainFrame
self.panel = wx.Panel(self, wx.ID_ANY)
# Specify BoxSizer
sizerMainV = wx.BoxSizer(wx.VERTICAL)
sizerMainH = wx.BoxSizer(wx.HORIZONTAL)
sizerStart = wx.BoxSizer(wx.HORIZONTAL)
# Panel: Data
self.PanelData = PanelData.CreateDataset(self.panel, self)
sizerMainH.Add(self.PanelData, 1, wx.EXPAND)
# Panel: Option
self.PanelOption = wx.Notebook(self.panel, 1, style=wx.NB_TOP)
self.AnovaWave = PanelAnalysis.AnovaWave(self.PanelOption, self)
self.AnovaIS = PanelAnalysis.AnovaIS(self.PanelOption, self)
self.PanelOption.AddPage(self.AnovaWave, 'ANOVA on Wave/GFP')
self.PanelOption.AddPage(self.AnovaIS, 'ANOVA in Brain Space')
self.AnovaWave.SetFocus()
sizerMainH.Add(self.PanelOption, 1, wx.EXPAND)
self.panel.SetSizer(sizerMainH)
sizerMainV.Add(self.panel, 1, wx.EXPAND)
# Button: Start
PanelStart = wx.Panel(self, wx.ID_ANY)
self.ButtonStart = wx.Button(PanelStart, wx.ID_ANY,
label="Start Calculation")
sizerStart.Add(self.ButtonStart, wx.ID_ANY, wx.EXPAND)
PanelStart.SetSizer(sizerStart)
sizerMainV.Add(PanelStart, 0, wx.EXPAND)
self.SetSizerAndFit(sizerMainV)
self.Show(True)
# Specification of events
self.Bind(wx.EVT_CLOSE, self.onClose)
wx.EVT_BUTTON(self, self.ButtonStart.Id, self.startAction)
self.Bind(wx.EVT_CHAR_HOOK, self.onKeyDown)
self.PanelData.SetFocus()
# MenuBar
menuBar = wx.MenuBar()
menu = wx.Menu()
m_exit = menu.Append(wx.ID_EXIT, "E&xit\tAlt-X",
"Close window and exit program.")
self.Bind(wx.EVT_MENU, self.onClose, m_exit)
menuBar.Append(menu, "&File")
menu = wx.Menu()
m_about = menu.Append(wx.ID_ABOUT, "&About", "Information about STEN")
self.Bind(wx.EVT_MENU, self.onAbout, m_about)
menuBar.Append(menu, "&Help")
self.SetMenuBar(menuBar)
def onKeyDown(self, event):
"""Key event handler if key is pressed within frame"""
keycode = event.GetKeyCode()
if keycode == wx.WXK_ESCAPE: # If ESC is pressed
self.onClose(event)
else:
event.Skip()
def onAbout(self, event):
"""Show about message when About Menu opened"""
dlg = wx.MessageDialog(
self,
"For more Information about STEN, go to " +
"https://github.com/jknebel/STEN",
"About STEN", wx.OK)
dlg.ShowModal()
dlg.Destroy()
event.Skip()
def onClose(self, event):
"""Show exit message when MainFrame gets closed"""
dlg = wx.MessageDialog(
self, "Do you really want to close STEN?", "Exit STEN",
wx.OK | wx.CANCEL | wx.ICON_QUESTION)
answer = dlg.ShowModal()
dlg.Destroy()
if answer == wx.ID_OK:
self.DestroyChildren()
self.Destroy()
event.Skip()
def startAction(self, event):
"""Starts the calculation"""
# Make sure that a dataset is present and saved before continuing
startCalculation = False
if self.Dataset == {}:
dlg = wx.MessageDialog(
self, caption='No dataset loaded',
message='No dataset is loaded. Create a new dataset or load ' +
'an already existing one to continue.',
style=wx.OK | wx.ICON_QUESTION)
dlg.ShowModal()
dlg.Destroy()
elif self.PanelData.TextResult.GetValue() == '':
dlg = wx.MessageDialog(
self, caption='No result folder selected',
message='Results folder was not indicated. Please specify ' +
'where the results of the computation should be ' +
'stored at.',
style=wx.OK | wx.ICON_QUESTION)
dlg.ShowModal()
dlg.Destroy()
self.PanelData.resultFolder(event)
elif self.AnovaWave.BoxAnalyse.GetSelection() == 0 \
and self.PanelOption.GetSelection() == 0:
dlg = wx.MessageDialog(
self, caption='No Analyse Type selected',
message='Please select one of the following analysis' +
' types:\n%s' % ', '.join(
self.AnovaWave.BoxAnalyse.GetItems()[1:]),
style=wx.OK | wx.ICON_QUESTION)
dlg.ShowModal()
dlg.Destroy()
elif self.Dataset != {} and not self.saved:
dlg = wx.MessageDialog(
self, caption='Unsaved Dataset',
message='Are you sure that you want to continue ' +
'without saving the loaded dataset first?',
style=wx.OK | wx.CANCEL | wx.ICON_QUESTION)
answer = dlg.ShowModal()
dlg.Destroy()
if answer == wx.ID_OK:
startCalculation = True
else:
startCalculation = True
# Start Calculation
if startCalculation:
self.ButtonStart.Disable()
calc = Calculation.Start(self)
self.PanelData.TxtProgress.SetLabel('\n'.join(calc.progressTxt))
self.ButtonStart.Enable()
event.Skip()
| {
"content_hash": "5189c5a3a84aeaa4f5b7fdc738e8722e",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 79,
"avg_line_length": 39.16447368421053,
"alnum_prop": 0.548966907441626,
"repo_name": "jknebel/STEN",
"id": "ee8a6d06419ff992d3bf3b6acb31c3245ec56c6d",
"size": "5953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Interface.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "262"
},
{
"name": "Python",
"bytes": "367671"
}
],
"symlink_target": ""
} |
from enum import Enum
LEARN_DUMMY = None
LEARN_RYTHM_MULTIPLIER = '2'
LEARN_BASE_RYTHM = '2'
LEARN_MAX_WORDS = '5'
available_settings = Enum('available_settings', 'LEARN_RYTHM_MULTIPLIER LEARN_BASE_RYTHM LEARN_MAX_WORDS')
| {
"content_hash": "9e639d0fbf0ab88e4be2c3de72401548",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 106,
"avg_line_length": 28,
"alnum_prop": 0.7455357142857143,
"repo_name": "Aigrefin/py3learn",
"id": "f55d5982f0cc0c5a9ae830671d6557f515258573",
"size": "224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "learn/learn_base_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1112"
},
{
"name": "HTML",
"bytes": "13577"
},
{
"name": "Python",
"bytes": "105197"
}
],
"symlink_target": ""
} |
import unittest
from os.path import abspath, dirname, join
import sys
DIR_TESTS = abspath(dirname(__file__))
DIR_LIB = join(dirname(DIR_TESTS), 'b2tob3')
sys.path.insert(0, DIR_LIB)
from b2tob3.b2tob3 import make_replacements
def get_replacement(fname):
with open(join(DIR_TESTS, fname), 'r') as f:
content = f.read()
return make_replacements(content)
class B2tob3TestSuite(unittest.TestCase):
"""b2tob3 test cases."""
def test_html_count(self):
(content, count) = get_replacement('fluid.html')
self.assertEqual(count, 22)
def test_html_match(self):
(content, count) = get_replacement('fluid.html')
self.assertRegexpMatches(content, 'col-md-3') | {
"content_hash": "bfab048b1264611fc4fc1e38381da13d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 56,
"avg_line_length": 25.464285714285715,
"alnum_prop": 0.6774193548387096,
"repo_name": "yaph/b2tob3",
"id": "f652e657c35ab128e3e56535791923edcd15de62",
"size": "737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_replacements.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13647"
},
{
"name": "Shell",
"bytes": "6701"
}
],
"symlink_target": ""
} |
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print "You have %d cheeses!" % cheese_count
print "You have %d boxes of crackers1" % boxes_of_crackers
print "Man that's enough for a party!"
print "Get a blanket.\n"
print "We can just give the function numbers directly:"
cheese_and_crackers(20, 30)
print "OR, we can use variables from our script:"
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print "We can even do math inside too:"
cheese_and_crackers(10 + 20, 5 + 6)
print "And we can combine the two, variables and math:"
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
| {
"content_hash": "2c002141a5562cb51f9448564d39902a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 70,
"avg_line_length": 34.35,
"alnum_prop": 0.7234352256186317,
"repo_name": "1uk/LPTHW",
"id": "7687838950d91da8d56daac168e53659f9297cd3",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex19.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "28466"
}
],
"symlink_target": ""
} |
import logging
import threading
import eventlet
from eventlet import greenpool
from conveyor.common import loopingcall
LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs):
"""Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if.
"""
kwargs['group'].thread_done(kwargs['thread'])
class Thread(object):
"""Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list.
"""
def __init__(self, thread, group):
self.thread = thread
self.thread.link(_thread_done, group=group, thread=self)
def stop(self):
self.thread.kill()
def wait(self):
return self.thread.wait()
def link(self, func, *args, **kwargs):
self.thread.link(func, *args, **kwargs)
class ThreadGroup(object):
"""The point of the ThreadGroup class is to:
* keep track of timers and greenthreads (making it easier to stop them
when need be).
* provide an easy API to add timers.
"""
def __init__(self, thread_pool_size=10):
self.pool = greenpool.GreenPool(thread_pool_size)
self.threads = []
self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs):
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
pulse.start(interval=interval,
initial_delay=initial_delay)
self.timers.append(pulse)
def add_thread(self, callback, *args, **kwargs):
gt = self.pool.spawn(callback, *args, **kwargs)
th = Thread(gt, self)
self.threads.append(th)
return th
def thread_done(self, thread):
self.threads.remove(thread)
def _stop_threads(self):
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
# don't kill the current thread.
continue
try:
x.stop()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
def stop_timers(self):
for x in self.timers:
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
self.timers = []
def stop(self, graceful=False):
"""stop function has the option of graceful=True/False.
* In case of graceful=True, wait for all threads to be finished.
Never kill threads.
* In case of graceful=False, kill threads immediately.
"""
self.stop_timers()
if graceful:
# In case of graceful=True, wait for all threads to be
# finished, never kill threads
self.wait()
else:
# In case of graceful=False(Default), kill threads
# immediately
self._stop_threads()
def wait(self):
for x in self.timers:
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
continue
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
| {
"content_hash": "afa7dad60fe22e00ce1c638e25fb0f92",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 79,
"avg_line_length": 31.066176470588236,
"alnum_prop": 0.5798816568047337,
"repo_name": "Hybrid-Cloud/conveyor",
"id": "9281a79540c637a6a47effa0e2a00e882d5e8a70",
"size": "4830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conveyor/common/threadgroup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3789174"
},
{
"name": "Shell",
"bytes": "16567"
}
],
"symlink_target": ""
} |
from collections.abc import Iterable, Mapping
from numbers import Real, Integral
from pathlib import Path
import subprocess
from xml.etree import ElementTree as ET
import numpy as np
import openmc
import openmc.checkvalue as cv
from ._xml import clean_indentation, reorder_attributes
from .mixin import IDManagerMixin
_BASES = ['xy', 'xz', 'yz']
_SVG_COLORS = {
'aliceblue': (240, 248, 255),
'antiquewhite': (250, 235, 215),
'aqua': (0, 255, 255),
'aquamarine': (127, 255, 212),
'azure': (240, 255, 255),
'beige': (245, 245, 220),
'bisque': (255, 228, 196),
'black': (0, 0, 0),
'blanchedalmond': (255, 235, 205),
'blue': (0, 0, 255),
'blueviolet': (138, 43, 226),
'brown': (165, 42, 42),
'burlywood': (222, 184, 135),
'cadetblue': (95, 158, 160),
'chartreuse': (127, 255, 0),
'chocolate': (210, 105, 30),
'coral': (255, 127, 80),
'cornflowerblue': (100, 149, 237),
'cornsilk': (255, 248, 220),
'crimson': (220, 20, 60),
'cyan': (0, 255, 255),
'darkblue': (0, 0, 139),
'darkcyan': (0, 139, 139),
'darkgoldenrod': (184, 134, 11),
'darkgray': (169, 169, 169),
'darkgreen': (0, 100, 0),
'darkgrey': (169, 169, 169),
'darkkhaki': (189, 183, 107),
'darkmagenta': (139, 0, 139),
'darkolivegreen': (85, 107, 47),
'darkorange': (255, 140, 0),
'darkorchid': (153, 50, 204),
'darkred': (139, 0, 0),
'darksalmon': (233, 150, 122),
'darkseagreen': (143, 188, 143),
'darkslateblue': (72, 61, 139),
'darkslategray': (47, 79, 79),
'darkslategrey': (47, 79, 79),
'darkturquoise': (0, 206, 209),
'darkviolet': (148, 0, 211),
'deeppink': (255, 20, 147),
'deepskyblue': (0, 191, 255),
'dimgray': (105, 105, 105),
'dimgrey': (105, 105, 105),
'dodgerblue': (30, 144, 255),
'firebrick': (178, 34, 34),
'floralwhite': (255, 250, 240),
'forestgreen': (34, 139, 34),
'fuchsia': (255, 0, 255),
'gainsboro': (220, 220, 220),
'ghostwhite': (248, 248, 255),
'gold': (255, 215, 0),
'goldenrod': (218, 165, 32),
'gray': (128, 128, 128),
'green': (0, 128, 0),
'greenyellow': (173, 255, 47),
'grey': (128, 128, 128),
'honeydew': (240, 255, 240),
'hotpink': (255, 105, 180),
'indianred': (205, 92, 92),
'indigo': (75, 0, 130),
'ivory': (255, 255, 240),
'khaki': (240, 230, 140),
'lavender': (230, 230, 250),
'lavenderblush': (255, 240, 245),
'lawngreen': (124, 252, 0),
'lemonchiffon': (255, 250, 205),
'lightblue': (173, 216, 230),
'lightcoral': (240, 128, 128),
'lightcyan': (224, 255, 255),
'lightgoldenrodyellow': (250, 250, 210),
'lightgray': (211, 211, 211),
'lightgreen': (144, 238, 144),
'lightgrey': (211, 211, 211),
'lightpink': (255, 182, 193),
'lightsalmon': (255, 160, 122),
'lightseagreen': (32, 178, 170),
'lightskyblue': (135, 206, 250),
'lightslategray': (119, 136, 153),
'lightslategrey': (119, 136, 153),
'lightsteelblue': (176, 196, 222),
'lightyellow': (255, 255, 224),
'lime': (0, 255, 0),
'limegreen': (50, 205, 50),
'linen': (250, 240, 230),
'magenta': (255, 0, 255),
'maroon': (128, 0, 0),
'mediumaquamarine': (102, 205, 170),
'mediumblue': (0, 0, 205),
'mediumorchid': (186, 85, 211),
'mediumpurple': (147, 112, 219),
'mediumseagreen': (60, 179, 113),
'mediumslateblue': (123, 104, 238),
'mediumspringgreen': (0, 250, 154),
'mediumturquoise': (72, 209, 204),
'mediumvioletred': (199, 21, 133),
'midnightblue': (25, 25, 112),
'mintcream': (245, 255, 250),
'mistyrose': (255, 228, 225),
'moccasin': (255, 228, 181),
'navajowhite': (255, 222, 173),
'navy': (0, 0, 128),
'oldlace': (253, 245, 230),
'olive': (128, 128, 0),
'olivedrab': (107, 142, 35),
'orange': (255, 165, 0),
'orangered': (255, 69, 0),
'orchid': (218, 112, 214),
'palegoldenrod': (238, 232, 170),
'palegreen': (152, 251, 152),
'paleturquoise': (175, 238, 238),
'palevioletred': (219, 112, 147),
'papayawhip': (255, 239, 213),
'peachpuff': (255, 218, 185),
'peru': (205, 133, 63),
'pink': (255, 192, 203),
'plum': (221, 160, 221),
'powderblue': (176, 224, 230),
'purple': (128, 0, 128),
'red': (255, 0, 0),
'rosybrown': (188, 143, 143),
'royalblue': (65, 105, 225),
'saddlebrown': (139, 69, 19),
'salmon': (250, 128, 114),
'sandybrown': (244, 164, 96),
'seagreen': (46, 139, 87),
'seashell': (255, 245, 238),
'sienna': (160, 82, 45),
'silver': (192, 192, 192),
'skyblue': (135, 206, 235),
'slateblue': (106, 90, 205),
'slategray': (112, 128, 144),
'slategrey': (112, 128, 144),
'snow': (255, 250, 250),
'springgreen': (0, 255, 127),
'steelblue': (70, 130, 180),
'tan': (210, 180, 140),
'teal': (0, 128, 128),
'thistle': (216, 191, 216),
'tomato': (255, 99, 71),
'turquoise': (64, 224, 208),
'violet': (238, 130, 238),
'wheat': (245, 222, 179),
'white': (255, 255, 255),
'whitesmoke': (245, 245, 245),
'yellow': (255, 255, 0),
'yellowgreen': (154, 205, 50)
}
class Plot(IDManagerMixin):
"""Definition of a finite region of space to be plotted.
OpenMC is capable of generating two-dimensional slice plots and
three-dimensional voxel plots. Colors that are used in plots can be given as
RGB tuples, e.g. (255, 255, 255) would be white, or by a string indicating a
valid `SVG color <https://www.w3.org/TR/SVG11/types.html#ColorKeywords>`_.
Parameters
----------
plot_id : int
Unique identifier for the plot
name : str
Name of the plot
Attributes
----------
id : int
Unique identifier
name : str
Name of the plot
width : Iterable of float
Width of the plot in each basis direction
pixels : Iterable of int
Number of pixels to use in each basis direction
origin : tuple or list of ndarray
Origin (center) of the plot
filename :
Path to write the plot to
color_by : {'cell', 'material'}
Indicate whether the plot should be colored by cell or by material
type : {'slice', 'voxel'}
The type of the plot
basis : {'xy', 'xz', 'yz'}
The basis directions for the plot
background : Iterable of int or str
Color of the background
mask_components : Iterable of openmc.Cell or openmc.Material
The cells or materials to plot
mask_background : Iterable of int or str
Color to apply to all cells/materials not listed in mask_components
show_overlaps : bool
Inidicate whether or not overlapping regions are shown
overlap_color : Iterable of int or str
Color to apply to overlapping regions
colors : dict
Dictionary indicating that certain cells/materials (keys) should be
displayed with a particular color.
level : int
Universe depth to plot at
meshlines : dict
Dictionary defining type, id, linewidth and color of a mesh to be
plotted on top of a plot
"""
next_id = 1
used_ids = set()
def __init__(self, plot_id=None, name=''):
# Initialize Plot class attributes
self.id = plot_id
self.name = name
self._width = [4.0, 4.0]
self._pixels = [400, 400]
self._origin = [0., 0., 0.]
self._filename = None
self._color_by = 'cell'
self._type = 'slice'
self._basis = 'xy'
self._background = None
self._mask_components = None
self._mask_background = None
self._show_overlaps = False
self._overlap_color = None
self._colors = {}
self._level = None
self._meshlines = None
@property
def name(self):
return self._name
@property
def width(self):
return self._width
@property
def pixels(self):
return self._pixels
@property
def origin(self):
return self._origin
@property
def filename(self):
return self._filename
@property
def color_by(self):
return self._color_by
@property
def type(self):
return self._type
@property
def basis(self):
return self._basis
@property
def background(self):
return self._background
@property
def mask_components(self):
return self._mask_components
@property
def mask_background(self):
return self._mask_background
@property
def show_overlaps(self):
return self._show_overlaps
@property
def overlap_color(self):
return self._overlap_color
@property
def colors(self):
return self._colors
@property
def level(self):
return self._level
@property
def meshlines(self):
return self._meshlines
@name.setter
def name(self, name):
cv.check_type('plot name', name, str)
self._name = name
@width.setter
def width(self, width):
cv.check_type('plot width', width, Iterable, Real)
cv.check_length('plot width', width, 2, 3)
self._width = width
@origin.setter
def origin(self, origin):
cv.check_type('plot origin', origin, Iterable, Real)
cv.check_length('plot origin', origin, 3)
self._origin = origin
@pixels.setter
def pixels(self, pixels):
cv.check_type('plot pixels', pixels, Iterable, Integral)
cv.check_length('plot pixels', pixels, 2, 3)
for dim in pixels:
cv.check_greater_than('plot pixels', dim, 0)
self._pixels = pixels
@filename.setter
def filename(self, filename):
cv.check_type('filename', filename, str)
self._filename = filename
@color_by.setter
def color_by(self, color_by):
cv.check_value('plot color_by', color_by, ['cell', 'material'])
self._color_by = color_by
@type.setter
def type(self, plottype):
cv.check_value('plot type', plottype, ['slice', 'voxel'])
self._type = plottype
@basis.setter
def basis(self, basis):
cv.check_value('plot basis', basis, _BASES)
self._basis = basis
@background.setter
def background(self, background):
self._check_color('plot background', background)
self._background = background
@colors.setter
def colors(self, colors):
cv.check_type('plot colors', colors, Mapping)
for key, value in colors.items():
cv.check_type('plot color key', key, (openmc.Cell, openmc.Material))
self._check_color('plot color value', value)
self._colors = colors
@mask_components.setter
def mask_components(self, mask_components):
cv.check_type('plot mask components', mask_components, Iterable,
(openmc.Cell, openmc.Material))
self._mask_components = mask_components
@mask_background.setter
def mask_background(self, mask_background):
self._check_color('plot mask background', mask_background)
self._mask_background = mask_background
@show_overlaps.setter
def show_overlaps(self, show_overlaps):
cv.check_type(f'Show overlaps flag for Plot ID="{self.id}"',
show_overlaps, bool)
self._show_overlaps = show_overlaps
@overlap_color.setter
def overlap_color(self, overlap_color):
self._check_color('plot overlap color', overlap_color)
self._overlap_color = overlap_color
@level.setter
def level(self, plot_level):
cv.check_type('plot level', plot_level, Integral)
cv.check_greater_than('plot level', plot_level, 0, equality=True)
self._level = plot_level
@meshlines.setter
def meshlines(self, meshlines):
cv.check_type('plot meshlines', meshlines, dict)
if 'type' not in meshlines:
msg = f'Unable to set the meshlines to "{meshlines}" which ' \
'does not have a "type" key'
raise ValueError(msg)
elif meshlines['type'] not in ['tally', 'entropy', 'ufs', 'cmfd']:
msg = 'Unable to set the meshlines with ' \
'type "{}"'.format(meshlines['type'])
raise ValueError(msg)
if 'id' in meshlines:
cv.check_type('plot meshlines id', meshlines['id'], Integral)
cv.check_greater_than('plot meshlines id', meshlines['id'], 0,
equality=True)
if 'linewidth' in meshlines:
cv.check_type('plot mesh linewidth', meshlines['linewidth'], Integral)
cv.check_greater_than('plot mesh linewidth', meshlines['linewidth'],
0, equality=True)
if 'color' in meshlines:
self._check_color('plot meshlines color', meshlines['color'])
self._meshlines = meshlines
@staticmethod
def _check_color(err_string, color):
cv.check_type(err_string, color, Iterable)
if isinstance(color, str):
if color.lower() not in _SVG_COLORS:
raise ValueError(f"'{color}' is not a valid color.")
else:
cv.check_length(err_string, color, 3)
for rgb in color:
cv.check_type(err_string, rgb, Real)
cv.check_greater_than('RGB component', rgb, 0, True)
cv.check_less_than('RGB component', rgb, 256)
def __repr__(self):
string = 'Plot\n'
string += '{: <16}=\t{}\n'.format('\tID', self._id)
string += '{: <16}=\t{}\n'.format('\tName', self._name)
string += '{: <16}=\t{}\n'.format('\tFilename', self._filename)
string += '{: <16}=\t{}\n'.format('\tType', self._type)
string += '{: <16}=\t{}\n'.format('\tBasis', self._basis)
string += '{: <16}=\t{}\n'.format('\tWidth', self._width)
string += '{: <16}=\t{}\n'.format('\tOrigin', self._origin)
string += '{: <16}=\t{}\n'.format('\tPixels', self._pixels)
string += '{: <16}=\t{}\n'.format('\tColor by', self._color_by)
string += '{: <16}=\t{}\n'.format('\tBackground', self._background)
string += '{: <16}=\t{}\n'.format('\tMask components',
self._mask_components)
string += '{: <16}=\t{}\n'.format('\tMask background',
self._mask_background)
string += '{: <16}=\t{}\n'.format('\tOverlap Color',
self._overlap_color)
string += '{: <16}=\t{}\n'.format('\tColors', self._colors)
string += '{: <16}=\t{}\n'.format('\tLevel', self._level)
string += '{: <16}=\t{}\n'.format('\tMeshlines', self._meshlines)
return string
@classmethod
def from_geometry(cls, geometry, basis='xy', slice_coord=0.):
"""Return plot that encompasses a geometry.
Parameters
----------
geometry : openmc.Geometry
The geometry to base the plot off of
basis : {'xy', 'xz', 'yz'}
The basis directions for the plot
slice_coord : float
The level at which the slice plot should be plotted. For example, if
the basis is 'xy', this would indicate the z value used in the
origin.
"""
cv.check_type('geometry', geometry, openmc.Geometry)
cv.check_value('basis', basis, _BASES)
# Decide which axes to keep
if basis == 'xy':
pick_index = (0, 1)
slice_index = 2
elif basis == 'yz':
pick_index = (1, 2)
slice_index = 0
elif basis == 'xz':
pick_index = (0, 2)
slice_index = 1
# Get lower-left and upper-right coordinates for desired axes
lower_left, upper_right = geometry.bounding_box
lower_left = lower_left[np.array(pick_index)]
upper_right = upper_right[np.array(pick_index)]
if np.any(np.isinf((lower_left, upper_right))):
raise ValueError('The geometry does not appear to be bounded '
f'in the {basis} plane.')
plot = cls()
plot.origin = np.insert((lower_left + upper_right)/2,
slice_index, slice_coord)
plot.width = upper_right - lower_left
return plot
def colorize(self, geometry, seed=1):
"""Generate a color scheme for each domain in the plot.
This routine may be used to generate random, reproducible color schemes.
The colors generated are based upon cell/material IDs in the geometry.
Parameters
----------
geometry : openmc.Geometry
The geometry for which the plot is defined
seed : Integral
The random number seed used to generate the color scheme
"""
cv.check_type('geometry', geometry, openmc.Geometry)
cv.check_type('seed', seed, Integral)
cv.check_greater_than('seed', seed, 1, equality=True)
# Get collections of the domains which will be plotted
if self.color_by == 'material':
domains = geometry.get_all_materials().values()
else:
domains = geometry.get_all_cells().values()
# Set the seed for the random number generator
np.random.seed(seed)
# Generate random colors for each feature
for domain in domains:
self.colors[domain] = np.random.randint(0, 256, (3,))
def highlight_domains(self, geometry, domains, seed=1,
alpha=0.5, background='gray'):
"""Use alpha compositing to highlight one or more domains in the plot.
This routine generates a color scheme and applies alpha compositing to
make all domains except the highlighted ones appear partially
transparent.
Parameters
----------
geometry : openmc.Geometry
The geometry for which the plot is defined
domains : Iterable of openmc.Cell or openmc.Material
A collection of the domain IDs to highlight in the plot
seed : int
The random number seed used to generate the color scheme
alpha : float
The value between 0 and 1 to apply in alpha compisiting
background : 3-tuple of int or str
The background color to apply in alpha compisiting
"""
cv.check_type('domains', domains, Iterable,
(openmc.Cell, openmc.Material))
cv.check_type('alpha', alpha, Real)
cv.check_greater_than('alpha', alpha, 0., equality=True)
cv.check_less_than('alpha', alpha, 1., equality=True)
cv.check_type('background', background, Iterable)
# Get a background (R,G,B) tuple to apply in alpha compositing
if isinstance(background, str):
if background.lower() not in _SVG_COLORS:
raise ValueError(f"'{background}' is not a valid color.")
background = _SVG_COLORS[background.lower()]
# Generate a color scheme
self.colorize(geometry, seed)
# Apply alpha compositing to the colors for all domains
# other than those the user wishes to highlight
for domain, color in self.colors.items():
if domain not in domains:
if isinstance(color, str):
color = _SVG_COLORS[color.lower()]
r, g, b = color
r = int(((1-alpha) * background[0]) + (alpha * r))
g = int(((1-alpha) * background[1]) + (alpha * g))
b = int(((1-alpha) * background[2]) + (alpha * b))
self._colors[domain] = (r, g, b)
def to_xml_element(self):
"""Return XML representation of the plot
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing plot data
"""
element = ET.Element("plot")
element.set("id", str(self._id))
if self._filename is not None:
element.set("filename", self._filename)
element.set("color_by", self._color_by)
element.set("type", self._type)
if self._type == 'slice':
element.set("basis", self._basis)
subelement = ET.SubElement(element, "origin")
subelement.text = ' '.join(map(str, self._origin))
subelement = ET.SubElement(element, "width")
subelement.text = ' '.join(map(str, self._width))
subelement = ET.SubElement(element, "pixels")
subelement.text = ' '.join(map(str, self._pixels))
if self._background is not None:
subelement = ET.SubElement(element, "background")
color = self._background
if isinstance(color, str):
color = _SVG_COLORS[color.lower()]
subelement.text = ' '.join(str(x) for x in color)
if self._colors:
for domain, color in sorted(self._colors.items(),
key=lambda x: x[0].id):
subelement = ET.SubElement(element, "color")
subelement.set("id", str(domain.id))
if isinstance(color, str):
color = _SVG_COLORS[color.lower()]
subelement.set("rgb", ' '.join(str(x) for x in color))
if self._mask_components is not None:
subelement = ET.SubElement(element, "mask")
subelement.set("components", ' '.join(
str(d.id) for d in self._mask_components))
color = self._mask_background
if color is not None:
if isinstance(color, str):
color = _SVG_COLORS[color.lower()]
subelement.set("background", ' '.join(
str(x) for x in color))
if self._show_overlaps:
subelement = ET.SubElement(element, "show_overlaps")
subelement.text = "true"
if self._overlap_color is not None:
color = self._overlap_color
if isinstance(color, str):
color = _SVG_COLORS[color.lower()]
subelement = ET.SubElement(element, "overlap_color")
subelement.text = ' '.join(str(x) for x in color)
if self._level is not None:
subelement = ET.SubElement(element, "level")
subelement.text = str(self._level)
if self._meshlines is not None:
subelement = ET.SubElement(element, "meshlines")
subelement.set("meshtype", self._meshlines['type'])
if self._meshlines['id'] is not None:
subelement.set("id", str(self._meshlines['id']))
if self._meshlines['linewidth'] is not None:
subelement.set("linewidth", str(self._meshlines['linewidth']))
if self._meshlines['color'] is not None:
subelement.set("color", ' '.join(map(
str, self._meshlines['color'])))
return element
def to_ipython_image(self, openmc_exec='openmc', cwd='.',
convert_exec='convert'):
"""Render plot as an image
This method runs OpenMC in plotting mode to produce a bitmap image which
is then converted to a .png file and loaded in as an
:class:`IPython.display.Image` object. As such, it requires that your
model geometry, materials, and settings have already been exported to
XML.
Parameters
----------
openmc_exec : str
Path to OpenMC executable
cwd : str, optional
Path to working directory to run in
convert_exec : str, optional
Command that can convert PPM files into PNG files
Returns
-------
IPython.display.Image
Image generated
"""
from IPython.display import Image
# Create plots.xml
Plots([self]).export_to_xml()
# Run OpenMC in geometry plotting mode
openmc.plot_geometry(False, openmc_exec, cwd)
# Convert to .png
if self.filename is not None:
ppm_file = f'{self.filename}.ppm'
else:
ppm_file = f'plot_{self.id}.ppm'
png_file = ppm_file.replace('.ppm', '.png')
subprocess.check_call([convert_exec, ppm_file, png_file])
return Image(png_file)
class Plots(cv.CheckedList):
"""Collection of Plots used for an OpenMC simulation.
This class corresponds directly to the plots.xml input file. It can be
thought of as a normal Python list where each member is a :class:`Plot`. It
behaves like a list as the following example demonstrates:
>>> xz_plot = openmc.Plot()
>>> big_plot = openmc.Plot()
>>> small_plot = openmc.Plot()
>>> p = openmc.Plots((xz_plot, big_plot))
>>> p.append(small_plot)
>>> small_plot = p.pop()
Parameters
----------
plots : Iterable of openmc.Plot
Plots to add to the collection
"""
def __init__(self, plots=None):
super().__init__(Plot, 'plots collection')
self._plots_file = ET.Element("plots")
if plots is not None:
self += plots
def append(self, plot):
"""Append plot to collection
Parameters
----------
plot : openmc.Plot
Plot to append
"""
super().append(plot)
def insert(self, index, plot):
"""Insert plot before index
Parameters
----------
index : int
Index in list
plot : openmc.Plot
Plot to insert
"""
super().insert(index, plot)
def colorize(self, geometry, seed=1):
"""Generate a consistent color scheme for each domain in each plot.
This routine may be used to generate random, reproducible color schemes.
The colors generated are based upon cell/material IDs in the geometry.
The color schemes will be consistent for all plots in "plots.xml".
Parameters
----------
geometry : openmc.Geometry
The geometry for which the plots are defined
seed : Integral
The random number seed used to generate the color scheme
"""
for plot in self:
plot.colorize(geometry, seed)
def highlight_domains(self, geometry, domains, seed=1,
alpha=0.5, background='gray'):
"""Use alpha compositing to highlight one or more domains in the plot.
This routine generates a color scheme and applies alpha compositing to
make all domains except the highlighted ones appear partially
transparent.
Parameters
----------
geometry : openmc.Geometry
The geometry for which the plot is defined
domains : Iterable of openmc.Cell or openmc.Material
A collection of the domain IDs to highlight in the plot
seed : int
The random number seed used to generate the color scheme
alpha : float
The value between 0 and 1 to apply in alpha compisiting
background : 3-tuple of int or str
The background color to apply in alpha compisiting
"""
for plot in self:
plot.highlight_domains(geometry, domains, seed, alpha, background)
def _create_plot_subelements(self):
for plot in self:
xml_element = plot.to_xml_element()
if len(plot.name) > 0:
self._plots_file.append(ET.Comment(plot.name))
self._plots_file.append(xml_element)
def export_to_xml(self, path='plots.xml'):
"""Export plot specifications to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'plots.xml'.
"""
# Reset xml element tree
self._plots_file.clear()
self._create_plot_subelements()
# Clean the indentation in the file to be user-readable
clean_indentation(self._plots_file)
# Check if path is a directory
p = Path(path)
if p.is_dir():
p /= 'plots.xml'
# Write the XML Tree to the plots.xml file
reorder_attributes(self._plots_file) # TODO: Remove when support is Python 3.8+
tree = ET.ElementTree(self._plots_file)
tree.write(str(p), xml_declaration=True, encoding='utf-8')
| {
"content_hash": "e1c9f3026a4cb1b1e37af65207ab9086",
"timestamp": "",
"source": "github",
"line_count": 849,
"max_line_length": 88,
"avg_line_length": 33.64193168433451,
"alnum_prop": 0.5640711434773475,
"repo_name": "amandalund/openmc",
"id": "0abff20d7b3ffc2d220e62663c7f6ae3523e209a",
"size": "28562",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "openmc/plots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8552"
},
{
"name": "C++",
"bytes": "1273951"
},
{
"name": "CMake",
"bytes": "27805"
},
{
"name": "Dockerfile",
"bytes": "1427"
},
{
"name": "Python",
"bytes": "2603788"
},
{
"name": "Shell",
"bytes": "2519"
}
],
"symlink_target": ""
} |
import datetime
import json
import lib.pushdata
import lib.getconfig
import lib.commonclient
import lib.puylogger
jolokia_url = lib.getconfig.getparam('Cassandra', 'jolokia')
cluster_name = lib.getconfig.getparam('SelfConfig', 'cluster_name')
check_type = 'cassandra'
reaction = -3
def runcheck():
local_vars = []
try:
rate = lib.record_rate.ValueRate()
timestamp = int(datetime.datetime.now().strftime("%s"))
jolo_mbeans = ('java.lang:type=Memory', 'org.apache.cassandra.db:type=Caches',
'org.apache.cassandra.transport:type=Native-Transport-Requests',
'org.apache.cassandra.request:type=*',
'org.apache.cassandra.metrics:type=Compaction,name=*',
'org.apache.cassandra.internal:type=GossipStage')
for beans in jolo_mbeans:
jolo_json = json.loads(lib.commonclient.httpget(__name__, jolokia_url+'/'+beans))
jolo_keys = jolo_json['value']
if beans == 'org.apache.cassandra.metrics:type=Compaction,name=*':
mon_values = jolo_keys['org.apache.cassandra.metrics:name=PendingTasks,type=Compaction']['Value']
name = 'cassa_pending_compactions'
local_vars.append({'name': name.lower(), 'timestamp': timestamp, 'value': mon_values, 'check_type': check_type})
elif beans == 'org.apache.cassandra.db:type=Caches':
needed_stats=('RowCacheHits','KeyCacheHits','RowCacheRequests','KeyCacheRequests')
for my_name in jolo_keys:
my_value = jolo_keys[my_name]
if my_name in needed_stats and my_value > 0:
name = 'cassa_' + my_name
value_rate = rate.record_value_rate(name, my_value, timestamp)
local_vars.append({'name': name.lower(), 'timestamp': timestamp, 'value': value_rate, 'check_type': check_type, 'chart_type': 'Rate'})
request_keys = ('RequestResponseStage','ReadStage','MutationStage')
if beans == 'org.apache.cassandra.request:type=*':
for key in request_keys:
name = 'cassa_' + key.lower()
value = jolo_keys['org.apache.cassandra.request:type='+key]['CompletedTasks']
value_rate = rate.record_value_rate(name, value, timestamp)
local_vars.append({'name': name.lower(), 'timestamp': timestamp, 'value': value_rate, 'check_type': check_type, 'chart_type': 'Rate'})
if beans == 'org.apache.cassandra.transport:type=Native-Transport-Requests':
name = 'cassa_native_transport_requests'
value = jolo_json['value']['CompletedTasks']
value_rate = rate.record_value_rate(name, value, timestamp)
local_vars.append({'name': name.lower(), 'timestamp': timestamp, 'value': value_rate, 'check_type': check_type, 'chart_type': 'Rate'})
data_dict = json.loads(lib.commonclient.httpget(__name__, jolokia_url + '/java.lang:type=GarbageCollector,name=*'))
ConcurrentMarkSweep = 'java.lang:name=ConcurrentMarkSweep,type=GarbageCollector'
G1Gc = 'java.lang:name=G1 Young Generation,type=GarbageCollector'
if ConcurrentMarkSweep in data_dict['value']:
CMS = True
G1 = False
elif G1Gc in data_dict['value']:
CMS = False
G1 = True
else:
CMS = False
G1 = False
heam_mem = 'java.lang:type=Memory'
jolo_json = json.loads(lib.commonclient.httpget(__name__, jolokia_url + '/' + heam_mem))
jolo_keys = jolo_json['value']
metr_name = ('used', 'committed', 'max')
heap_type = ('NonHeapMemoryUsage', 'HeapMemoryUsage')
for heap in heap_type:
for metr in metr_name:
if heap == 'NonHeapMemoryUsage':
key = 'cassa_nonheap_' + metr
mon_values = jolo_keys[heap][metr]
if metr == 'used':
local_vars.append({'name': key, 'timestamp': timestamp, 'value': mon_values, 'check_type': check_type})
else:
local_vars.append({'name': key, 'timestamp': timestamp, 'value': mon_values, 'check_type': check_type, 'reaction': reaction})
else:
key = 'cassa_heap_' + metr
mon_values = jolo_keys[heap][metr]
if metr == 'used':
local_vars.append({'name': key, 'timestamp': timestamp, 'value': mon_values, 'check_type': check_type})
else:
local_vars.append({'name': key, 'timestamp': timestamp, 'value': mon_values, 'check_type': check_type, 'reaction': reaction})
if CMS is True:
collector = ('java.lang:name=ParNew,type=GarbageCollector', 'java.lang:name=ConcurrentMarkSweep,type=GarbageCollector')
for coltype in collector:
beans = json.loads(lib.commonclient.httpget(__name__, jolokia_url + '/' + coltype))
if beans['value']['LastGcInfo']:
LastGcInfo = beans['value']['LastGcInfo']['duration']
CollectionCount = beans['value']['CollectionCount']
CollectionTime = beans['value']['CollectionTime']
def push_metrics(preffix):
local_vars.append({'name': 'cassa_' + preffix + '_collection_count', 'timestamp': timestamp, 'value': CollectionCount, 'check_type': check_type})
CollectionTime_rate = rate.record_value_rate('cassa_' + preffix + '_collection_time', CollectionTime, timestamp)
local_vars.append({'name': 'cassa_' + preffix + '_collection_time', 'timestamp': timestamp, 'value': CollectionTime_rate, 'check_type': check_type, 'chart_type': 'Rate'})
if 'LastGcInfo' in locals():
local_vars.append({'name': 'cassa_' + preffix + '_lastgcinfo', 'timestamp': timestamp, 'value': LastGcInfo, 'check_type': check_type})
if coltype == 'java.lang:name=ConcurrentMarkSweep,type=GarbageCollector':
push_metrics(preffix='cms')
if coltype == 'java.lang:name=ParNew,type=GarbageCollector':
push_metrics(preffix='parnew')
if G1 is True:
gc_g1 = ('/java.lang:name=G1%20Old%20Generation,type=GarbageCollector', '/java.lang:name=G1%20Young%20Generation,type=GarbageCollector')
def check_null(value):
if value is None:
value = 0
return value
else:
return value
for k, v in enumerate(gc_g1):
j = json.loads(lib.commonclient.httpget(__name__, jolokia_url + v))
name = 'LastGcInfo'
if k is 0:
try:
value = j['value'][name]['duration']
v = check_null(value)
except:
v = 0
pass
m_name = 'cassa_G1_old_LastGcInfo'
if k is 1:
value = j['value'][name]['duration']
v = check_null(value)
m_name = 'cassa_G1_young_LastGcInfo'
local_vars.append({'name': m_name, 'timestamp': timestamp, 'value': v, 'check_type': check_type})
metr_keys = ('CollectionTime', 'CollectionCount')
for k, v in enumerate(gc_g1):
j = json.loads(lib.commonclient.httpget(__name__, jolokia_url + v))
if k is 0:
type = '_old_'
if k is 1:
type = '_young_'
for ky, vl in enumerate(metr_keys):
if ky is 0:
value = j['value'][vl]
v = check_null(value)
rate_key = vl + type
CollectionTime_rate = rate.record_value_rate('cassa_' + rate_key, v, timestamp)
local_vars.append({'name': 'cassa_g1' + type + vl, 'timestamp': timestamp, 'value': vl, 'check_type': check_type})
if ky is 1:
value = j['value'][vl]
v = check_null(value)
local_vars.append({'name': 'cassa_g1' + type + vl, 'timestamp': timestamp, 'value': v, 'check_type': check_type})
jolo_threads = 'java.lang:type=Threading'
jolo_tjson = json.loads(lib.commonclient.httpget(__name__, jolokia_url + '/' + jolo_threads))
thread_metrics = ('TotalStartedThreadCount', 'PeakThreadCount', 'ThreadCount', 'DaemonThreadCount')
for thread_metric in thread_metrics:
name = 'cassa_' + thread_metric.lower()
vlor = jolo_tjson['value'][thread_metric]
local_vars.append({'name': name, 'timestamp': timestamp, 'value': vlor, 'check_type': check_type})
return local_vars
except Exception as e:
lib.puylogger.print_message(__name__ + ' Error : ' + str(e))
pass
| {
"content_hash": "a84b880527fcaca9abd6b66f1cac0a1b",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 190,
"avg_line_length": 55.22754491017964,
"alnum_prop": 0.5435324731649138,
"repo_name": "net-angels/puypuy",
"id": "3f1d4ee29b79bc501097cf12f4eee99227a31386",
"size": "9223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checks_available/check_cassandra.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "260"
},
{
"name": "Python",
"bytes": "201565"
},
{
"name": "Shell",
"bytes": "1648"
}
],
"symlink_target": ""
} |
from src.PincherArm import PincherArm
from time import sleep
# set up arm
arm = PincherArm()
# move arm straight up
arm.init_arm()
# wait 3 seconds
sleep(3)
# move arm to shutdown position
arm.shutdown_position()
# kill power to arm
arm.servos.turn_off()
| {
"content_hash": "dcc73824a0ce0c1a697f404535a154f9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 37,
"avg_line_length": 19.692307692307693,
"alnum_prop": 0.75,
"repo_name": "nick-zanobini/nick-zanobini.github.io",
"id": "1c5fc0a3a33d5990433286968cc28fe7a9137c41",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assets/PhantomX-Pincher-Arm/ArmTest/basic_move_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "93492"
},
{
"name": "HTML",
"bytes": "60089"
},
{
"name": "JavaScript",
"bytes": "56007"
},
{
"name": "Python",
"bytes": "6562"
},
{
"name": "Ruby",
"bytes": "2189"
}
],
"symlink_target": ""
} |
'''
This script reads data from various sources to process and store in MongoDB.
'''
import os
import pyexcel
import logging
import json
import models
from transform_date import *
from accent_remover import *
logging.basicConfig(filename='logs/manuscritos.info.txt', level=logging.INFO)
logger = logging.getLogger(__name__)
# Add manuscripts for journals
def manus(filename):
sheet = pyexcel.get_sheet(
file_name=filename,
sheet_name='import',
name_columns_by_row=0)
sheet_json = sheet.to_records()
for rec in sheet_json:
print(rec['issn_scielo'])
query = models.Scielo.objects.filter(
issn_scielo=rec['issn_scielo'])
if len(query) == 1 and 'manuscritos' not in query[0]:
doc = query[0]
print(query[0]['issn_scielo'])
data = {'manuscritos': {}}
data['manuscritos'] = dict(rec)
if data:
doc.modify(**data)
# doc.save()
else:
if len(query) == 1:
doc = query[0]
data = {'manuscritos': ''}
data['manuscritos'] = json.loads(
query[0].to_json())['manuscritos']
for k, v in rec.items():
if k not in data['manuscritos']:
data['manuscritos'][k] = rec[k]
if data:
doc.update(**data)
def main():
# SciELO docs counts Network xlsx
filelist = [f for f in os.listdir(
'data/scielo/manuscritos/') if 'xlsx' in f]
filelist.sort()
for f in filelist:
print(f)
manus('data/scielo/manuscritos/' + f)
if __name__ == "__main__":
main()
| {
"content_hash": "2893b4bb871247b430d1f70bf46ce6ed",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 77,
"avg_line_length": 25.205882352941178,
"alnum_prop": 0.5414235705950992,
"repo_name": "scieloorg/journals-catalog",
"id": "f00f90e1c7f8a7649da69f6c8dfe0e6c32285d6c",
"size": "1730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jcatalog/transform/scielo_manuscripts_update.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "112941"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import numpy as np
from trainer_nn import TrainerNN
class RepslyFF(TrainerFF):
| {
"content_hash": "8f282de5277708e9d5715901db6dcfb2",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 32,
"avg_line_length": 17.5,
"alnum_prop": 0.8095238095238095,
"repo_name": "wecliqued/deep_learning",
"id": "fe3ca4e28ff74290136fd466d787c76bf7688f7d",
"size": "105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "repsly_challenge/repsly_nn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "26063"
},
{
"name": "Python",
"bytes": "91399"
}
],
"symlink_target": ""
} |
from mock import PropertyMock, MagicMock, patch
from workflow.steps.util.database import (StopIfRunning, StopSlaveIfRunning,
StopIfRunningAndVMUp)
from workflow.steps.tests.base import StepBaseTestCase
class StopIfRunningTestCase(StepBaseTestCase):
step_class = StopIfRunning
@patch('workflow.steps.util.database.StopIfRunning.host',
new=PropertyMock(return_value=None))
@patch('workflow.steps.util.database.StopIfRunning.is_up',
new=MagicMock(return_value=True))
def test_not_valid_when_dosent_have_host(self):
self.assertFalse(self.step.is_valid)
@patch('workflow.steps.util.database.StopIfRunning.host',
new=PropertyMock(return_value=MagicMock()))
@patch('workflow.steps.util.database.StopIfRunning.is_up',
new=MagicMock(return_value=False))
def test_have_host_but_is_down(self):
self.assertFalse(self.step.is_valid)
@patch('workflow.steps.util.database.StopIfRunning.host',
new=PropertyMock(return_value=MagicMock()))
@patch('workflow.steps.util.database.StopIfRunning.is_up',
new=MagicMock(return_value=True))
def test_is_valid(self):
self.assertTrue(self.step.is_valid)
class StopIfRunningAndVMUpTestCase(StepBaseTestCase):
step_class = StopIfRunningAndVMUp
@patch('workflow.steps.util.database.StopIfRunning.host',
new=PropertyMock(return_value=MagicMock()))
@patch('workflow.steps.util.database.StopIfRunning.is_up',
new=MagicMock(return_value=False))
@patch('workflow.steps.util.database.StopIfRunning.vm_is_up',
new=MagicMock(return_value=False))
def test_vm_down(self):
self.assertFalse(self.step.is_valid)
@patch('workflow.steps.util.database.StopIfRunning.host',
new=PropertyMock(return_value=MagicMock()))
@patch('workflow.steps.util.database.StopIfRunning.is_up',
new=MagicMock(return_value=False))
@patch('workflow.steps.util.database.StopIfRunning.vm_is_up',
new=MagicMock(return_value=True))
def test_vm_up_but_db_down(self):
self.assertFalse(self.step.is_valid)
@patch('workflow.steps.util.database.StopIfRunning.host',
new=PropertyMock(return_value=MagicMock()))
@patch('workflow.steps.util.database.StopIfRunning.is_up',
new=MagicMock(return_value=True))
@patch('workflow.steps.util.database.StopIfRunning.vm_is_up',
new=MagicMock(return_value=True))
def test_is_valid(self):
self.assertTrue(self.step.is_valid)
class StopSlaveIfRunningTestCase(StepBaseTestCase):
step_class = StopSlaveIfRunning
@patch('workflow.steps.util.database.StopSlaveIfRunning.is_up',
new=MagicMock(return_value=False))
def test_db_is_down(self):
self.assertFalse(self.step.is_valid)
@patch('workflow.steps.util.database.StopSlaveIfRunning.is_up',
new=MagicMock(return_value=True))
def test_db_is_up(self):
self.assertTrue(self.step.is_valid)
| {
"content_hash": "560ca67d123782c820ff80f4c72b0752",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 76,
"avg_line_length": 40.78666666666667,
"alnum_prop": 0.6953252696959791,
"repo_name": "globocom/database-as-a-service",
"id": "01f6546f58bb12d93ce549eed3b9dce82e446c19",
"size": "3059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbaas/workflow/steps/tests/test_database_steps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "243568"
},
{
"name": "Dockerfile",
"bytes": "1372"
},
{
"name": "HTML",
"bytes": "310401"
},
{
"name": "JavaScript",
"bytes": "988830"
},
{
"name": "Makefile",
"bytes": "5199"
},
{
"name": "Python",
"bytes": "9674426"
},
{
"name": "Shell",
"bytes": "215115"
}
],
"symlink_target": ""
} |
"""
dyn_pages/feeds.py
Author: Kord Campbell
Date Added: 1/1/07
Builds RSS and ATOM feeds for the various pages on the site - cool, isn't it?
"""
## STD LIBS
from time import mktime
from xml.dom import minidom
from pprint import pformat
import datetime, xml
## OUR LIBS
from decorators import stack
import utils
## 3RD PARTY LIBS
from twisted.internet.defer import Deferred
from nevow import rend, inevow
class feeds(rend.Page):
addSlash = True
def __init__(self, username, app, log):
self.username = username
self.segments = []
self.app = app
self.log = log
self.domain = self.app.servers.httpserver._cfg_site_domain
def locateChild(self, ctx, segments):
self.segments = segments
return self, []
# called by twisted, this function parses the URL to figure out what type of feed to deliver, and what the globber should return
def renderHTTP(self, ctx):
self.request = inevow.IRequest(ctx)
self.title_append_item = ""
self.search_segment = ""
# prep the glob with default info
glob = {'include_comment_total': 1, 'order_by': 'date_uploaded', 'order_dir': 'desc', 'tag_list': True}
self.search_items = []
self.user_info = {}
# ensure we have the correct feed types passed
if self.segments[0] == "rss" or self.segments[0] == "atom" or self.segments[0] == "kml":
if len(self.segments) > 1 and self.segments[1]:
# parse out the search terms
self.search_items = self.segments[1].split(".")
if len(self.search_items) != 2:
# ummm, no - we have no idea what you are doing
return '<meta http-equiv="refresh" content="0;url=http://www.%s/not_found/">' % self.domain
else:
if self.search_items[0] == "SSQ":
# search query
glob['simple_search_query'] = self.search_items[1]
self.title_append_item = " - searching on '%s'" % self.search_items[1]
self.search_segment = self.segments[1]
elif self.search_items[0] == "TUN":
# tag query - we'll need to update this when we get advanced search (intersections/unions on tags)
glob['tag_union'] = [self.search_items[1]]
self.title_append_item = " - tagged with '%s'" % self.search_items[1]
self.search_segment = self.segments[1]
elif self.search_items[0] == "ALB":
# album query - we need to update this to support album names - but whatever - it's release time!!!
glob['album_id'] = int(self.search_items[1])
self.title_append_item = ""
self.search_segment = self.segments[1]
else:
# again, we have no idea what you are doing
return '<meta http-equiv="refresh" content="0;url=http://www.%s/not_found/">' % self.domain
else:
# unsupported feed type
return '<meta http-equiv="refresh" content="0;url=http://www.%s/not_found/">' % self.domain
def act(globber_result):
# grab some user info
d2 = self.app.api.users.get_user_id(self.username)
d2.addCallback(handle_user_id, globber_result)
return d2
def handle_user_id(result):
if result[0] != 0:
return "Bad username"
self.userid = result[1]
if self.userid:
d3 = self.app.api.users.get_info(self.userid, None)
else:
d3 = Deferred()
d3.callback((0, {}))
d3.addCallback(handle_user_info)
return d3
@stack
def handle_user_info(result):
if result[0] != 0:
return "NO USER INFO"
self.user_info = result[1]
limit = 20 # flickr does 20, so we do 20
offset = 0 # shouldn't ever be anything else
d4 = self.app.api.globber.get_images(self.userid, None, glob, limit, offset)
d4.addCallback(handle_globber_result)
return d4
@stack
def handle_globber_result(result):
if result[0] != 0:
return "NO PHOTOS"
self.photo_list = result[1]
if self.userid:
if len(self.search_items) and self.search_items[0] == "ALB":
self.log.warning("we have an album search item: %s" % self.search_items[1])
d5 = self.app.api.albums.get_info(None, int(self.search_items[1]))
else:
self.log.warning("no album search item")
d5 = Deferred()
d5.callback((0, None))
else:
d5 = Deferred()
d5.callback((0, None))
d5.addCallback(handle_album_info)
return d5
@stack
def handle_album_info(result):
if result[0] != 0:
return "NO ALBUMS"
self.album_info = result[1]
if not self.userid:
self.user_info['last_login'] = datetime.datetime.now()
self.user_info['date_created'] = datetime.datetime.now()
globber_arg = (0, self.photo_list)
album_arg = (0, self.album_info)
if self.segments[0] == "rss":
return self._build_rss2_feed(self.user_info, globber_arg, album_arg)
elif self.segments[0] == "atom":
return self._build_atom_feed(self.user_info, globber_arg, album_arg)
elif self.segments[0] == "kml":
return self._build_kml_feed(self.user_info, globber_arg, album_arg)
if self.username == "*ALL*":
d = Deferred()
d.callback((0, None))
else:
d = self.app.api.users.get_user_id(self.username)
d.addCallback(handle_user_id)
return d
def _format_date(self, dt):
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][dt.weekday()],
dt.day,
["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][dt.month-1],
dt.year, dt.hour, dt.minute, dt.second)
# function that josh had to write twice
@stack
def create_node(self, doc, label, attributes, children):
node = doc.createElement(label)
for key, value in attributes.items():
node.setAttribute(key, value)
if children:
for child in children:
node.appendChild(child)
return node
# call for building a rss feed
def _build_rss2_feed(self, user_result, globber_result, album_result):
if album_result[0] == 0 and album_result[1]:
album_info = album_result[1]
else:
album_info = {}
MEDIA_NS = 'http://search.yahoo.com/mrss/'
DUBLIN_NS = 'http://purl.org/dc/elements/1.1/'
if album_info:
## you have an album
self.title_append_item = " - %s" % album_info['title']
else:
self.title_append_item = ""
# Create an atom feed
doc = xml.dom.minidom.getDOMImplementation().createDocument('', 'rss', None)
doc.documentElement.setAttribute('version', "2.0")
doc.documentElement.setAttribute('xmlns:media', MEDIA_NS)
doc.documentElement.setAttribute('xmlns:dc', DUBLIN_NS)
# create channel entry
channel_node = self.create_node(doc, 'channel', {}, [])
channel_node.appendChild(doc.createTextNode("\n"))
if self.username != "*ALL*":
# define possessive username
pos_user = utils.possesive_username(self.username)
# Title
feed_title = "%s photos%s" % (pos_user, self.title_append_item)
else:
feed_title = "Everyone's photos on Zoto%s" % (self.title_append_item)
title_node = self.create_node(doc, 'title', {}, [doc.createTextNode(feed_title)])
channel_node.appendChild(title_node)
# feed links
if self.username != "*ALL*":
feed_link = "http://www.%s/site/#USR.%s::PAG.lightbox" % (self.domain, self.username)
else:
feed_link = "http://www.%s/#PAG.explore" % self.domain
link_node = self.create_node(doc, 'link', {}, [doc.createTextNode(feed_link)])
channel_node.appendChild(link_node)
# description
if self.username != "*ALL*":
feed_sub_title = "A feed of %s photos" % (pos_user)
else:
feed_sub_title = "A feed of everyone's photos on Zoto%s." % (self.title_append_item)
sub_title_node = self.create_node(doc, 'description', {}, [doc.createTextNode(feed_sub_title)])
channel_node.appendChild(sub_title_node)
# pubdate and lastbuilddate
if globber_result[0]:
feed_pubDate = self._format_date(datetime.datetime.now())
feed_lastBuildDate = self._format_date(datetime.datetime.now())
else:
feed_pubDate = self._format_date(datetime.datetime.now())
feed_lastBuildDate = self._format_date(datetime.datetime.now())
#feed_pubDate = self._format_date(globber_result[1]['date_uploaded'])
#feed_lastBuildDate = self._format_date(globber_result[1]['date_uploaded'])
# TODO - fix this to use GMT converted times
pub_date_node = self.create_node(doc, 'pubDate', {}, [doc.createTextNode(feed_pubDate)])
last_build_node = self.create_node(doc, 'lastBuildDate', {}, [doc.createTextNode(feed_lastBuildDate)])
channel_node.appendChild(pub_date_node)
channel_node.appendChild(last_build_node)
# Generator
PROGRAM_URI = "http://www.%s/" % self.domain
generator_node = self.create_node(doc, 'generator', {}, [doc.createTextNode(PROGRAM_URI)])
channel_node.appendChild(generator_node)
# image
if self.username != "*ALL*":
image = "http://www.%s/%s/avatar-small.jpg" % (self.domain, self.username)
else:
image = "http://www.%s/image/avatar-11.jpg" % (self.domain)
image_node = self.create_node(doc, 'image', {}, [])
image_url_node = self.create_node(doc, 'url', {}, [doc.createTextNode(image)])
image_title_node = self.create_node(doc, 'title', {}, [doc.createTextNode(feed_title)])
image_link_node = self.create_node(doc, 'link', {}, [doc.createTextNode(feed_link)])
image_node.appendChild(image_url_node)
image_node.appendChild(image_title_node)
image_node.appendChild(image_link_node)
channel_node.appendChild(image_node)
if not globber_result[0]:
# photos
for i in globber_result[1]:
item_node = doc.createElement('item')
# Title
item_title = i['title']
title_node = self.create_node(doc, 'title', {}, [doc.createTextNode(item_title)])
item_node.appendChild(title_node)
# feed links - alternate
item_link = "http://www.%s/site/#USR.%s::PAG.detail::%s" % (self.domain, i['owner_username'], i['media_id'])
link_node = self.create_node(doc, 'link', {}, [doc.createTextNode(item_link)])
item_node.appendChild(link_node)
# do the math for the rendered width or height (using a 240x240 render - size 28)
height_width_constraint = 240 # change this if you change the size we use (which we won't)
image_size_used = 28
if i['current_width'] and i['current_height']:
original_width = i['current_width']
original_height = i['current_height']
else:
original_width = i['original_width']
original_height = i['original_height']
if original_width > original_height:
# landscape
item_width = height_width_constraint
item_height = (height_width_constraint * (original_height*100/original_width))/100
elif original_width < original_height:
# portrait
item_height = height_width_constraint
item_width = (height_width_constraint * (original_width*100/original_height))/100
else:
# square
item_height = height_width_constraint
item_width = height_width_constraint
# description
image_link = "http://www.%s/%s/img/%s/%s.jpg" % (self.domain, i['owner_username'], image_size_used, i['media_id'])
description = '<p><a href="http://www.%s/%s">%s</a> posted a photo</p><p><a href="%s" title="%s"><img src="%s" width="%s" height="%s" alt="%s" style="border: 1px solid #ccc;"/></a></p>' % (self.domain, i['owner_username'], i['owner_username'], item_link, item_title, image_link, item_width, item_height, item_title)
description_node = self.create_node(doc, 'description', {}, [doc.createTextNode(description)])
item_node.appendChild(description_node)
# pubDate entry
pubDate = self._format_date(i['date_uploaded'])
pubdate_node = self.create_node(doc, 'pubDate', {}, [doc.createTextNode(pubDate)])
item_node.appendChild(pubdate_node)
# dc:date.Taken entry
if i['date']:
taken = datetime.datetime.utcfromtimestamp(mktime(i['date'].timetuple()))
else:
taken = datetime.datetime.utcfromtimestamp(mktime(i['date_uploaded'].timetuple()))
year = taken.year
month = taken.month
day = taken.day
hour = taken.hour
minute = taken.minute
second = taken.second
date_taken = "%s-%2.2d-%2.2dT%2.2d:%2.2d:%2.2dZ" % (year, int(month), day, hour, minute, second)
date_node = self.create_node(doc, 'dc:date.Taken', {}, [doc.createTextNode(date_taken)])
item_node.appendChild(date_node)
# author entry
author_entry = "[email protected] (%s)" % (i['owner_username'])
date_node = self.create_node(doc, 'author', {}, [doc.createTextNode(author_entry)])
# guid entry
item_guid = "tag:%s,%s:/site/#USR.%s::PAG.detail::%s" % (self.domain, i['date_uploaded'].year, i['owner_username'], i['media_id'])
guid_node = self.create_node(doc, 'guid', {'isPermaLink': "false"}, [doc.createTextNode(item_guid)])
item_node.appendChild(guid_node)
# media:content entry
item_media_content_url = "http://www.%s/img/35/%s.jpg" % (self.domain, i['media_id'])
item_media_content_type = "image/jpeg"
item_media_content_height = str(item_height)
item_media_content_width = str(item_width)
media_content_node = self.create_node(doc, 'media:content', {'url': item_media_content_url, 'type': item_media_content_type, 'height': item_media_content_height, 'width': item_media_content_width}, [])
item_node.appendChild(media_content_node)
# media:title entry
item_media_title = i['title']
media_title_node = self.create_node(doc, 'media:title', {}, [doc.createTextNode(item_media_title)])
item_node.appendChild(media_title_node)
# media:text entry
media_text_node = self.create_node(doc, 'media:text', {'type': "html"}, [doc.createTextNode(description)])
item_node.appendChild(media_text_node)
# again, we are using a predefined size, 16, which is 75x75 (minimal view size)
item_media_thumbnail_height = 75
item_media_thumbnail_width = 75
item_media_image_size_used = 16
# build the thumbnail for media:thumbnail (convert height and width to strings so the library can handle it)
item_media_thumbnail_url = "http://www.%s/%s/img/%s/%s.jpg" % (self.domain, i['owner_username'], item_media_image_size_used, i['media_id'])
height = str(item_media_thumbnail_height)
width = str(item_media_thumbnail_width)
media_thumbnail_node = self.create_node(doc, 'media:thumbnail', {'url': item_media_thumbnail_url, 'height': height, 'width': width}, [])
item_node.appendChild(media_thumbnail_node)
# media:credit entry
item_media_credit = i['owner_username']
item_media_role = "photographer"
media_credit_node = self.create_node(doc, 'media:credit', {'role': item_media_role}, [doc.createTextNode(item_media_credit)])
item_node.appendChild(media_credit_node)
# loop through the tags on the photo and build a category entry - how do we do this with multi-word tags? dunno.
tag_nodes = []
for j in i['tag_list']:
tag_nodes.append(doc.createTextNode(unicode(j, "utf-8")))
media_category_node = self.create_node(doc, 'media:category', {'scheme': "urn:zoto:tags"}, tag_nodes)
item_node.appendChild(media_category_node)
# attach item node - last thing in loop
channel_node.appendChild(item_node)
doc.documentElement.appendChild(channel_node)
# build the page and set the type
data = doc.toxml('utf-8')
self.request.setHeader('content-type', 'text/xml')
self.request.setHeader('content-length', str(len(data)))
return data
# call for building an atom feed
@stack
def _build_atom_feed(self, user_result, globber_result, album_result):
if album_result[0] == 0 and album_result[1]:
album_info = album_result[1]
else:
album_info = {}
ATOM_NS = 'http://www.w3.org/2005/Atom'
DUBLIN_NS = 'http://purl.org/dc/elements/1.1/'
# Create an atom feed
doc = xml.dom.minidom.getDOMImplementation().createDocument('', 'feed', None)
doc.documentElement.setAttribute('xmlns',ATOM_NS)
doc.documentElement.setAttribute('xmlns:dc',DUBLIN_NS)
if self.username != "*ALL*":
# define possessive username
pos_user = utils.possesive_username(self.username)
# Title
feed_title = "%s photos%s" % (pos_user, self.title_append_item)
else:
feed_title = "Everyone's photos on Zoto%s" % (self.title_append_item)
title_node = self.create_node(doc, 'title', {}, [doc.createTextNode(feed_title)])
doc.documentElement.appendChild(title_node)
# feed links - self
if self.username != "*ALL*":
feed_link = "http://www.%s/%s/feeds/atom/%s" % (self.domain, self.username, self.search_segment)
else:
feed_link = "http://www.%s/%s/feeds/atom/%s" % (self.domain, "community", self.search_segment)
link_node = self.create_node(doc, 'link', {'rel': 'self', 'href': feed_link}, [])
doc.documentElement.appendChild(link_node)
# feed links - alternate
if self.username != "*ALL*":
feed_link = "http://www.%s/%s/" % (self.domain, self.username)
else:
feed_link = "http://www.%s/%s/" % (self.domain, "community")
link_node = self.create_node(doc, 'link', {'rel': "alternate", 'type': "text/html", 'href': feed_link}, [])
doc.documentElement.appendChild(link_node)
# ID
if self.username != "*ALL*":
id_text = "tag:%s,%s:/%s/photos#%s" % (self.domain, user_result['date_created'].year, self.username, self.search_segment)
else:
id_text = "tag:%s,%s:/%s/photos#%s" % (self.domain, user_result['date_created'].year, "community", self.search_segment)
id_node = self.create_node(doc, 'id', {}, [doc.createTextNode(id_text)])
doc.documentElement.appendChild(id_node)
# user icon
if self.username != "*ALL*":
feed_icon = "http://www.%s/%s/avatar-small.jpg" % (self.domain, self.username)
else:
feed_icon = "http://www.%s/image/avatar-11.jpg" % (self.domain)
icon_node = self.create_node(doc, 'icon', {}, [doc.createTextNode(feed_icon)])
doc.documentElement.appendChild(icon_node)
# sub Title
if self.username != "*ALL*":
feed_sub_title = "A feed of %s photos" % (pos_user)
else:
feed_sub_title = "A feed of everyone's photos on Zoto%s." % (self.title_append_item)
sub_title_node = self.create_node(doc, 'subtitle', {}, [doc.createTextNode(feed_sub_title)])
doc.documentElement.appendChild(sub_title_node)
# updated
updated = datetime.datetime.utcfromtimestamp(mktime(user_result['last_login'].timetuple()))
year = updated.year
month = updated.month
day = updated.day
hour = updated.hour
minute = updated.minute
second = updated.second
feed_updated = "%s-%2.2d-%2.2dT%2.2d:%2.2d:%2.2dZ" % (year, int(month), day, hour, minute, second)
updated_node = self.create_node(doc, 'updated', {}, [doc.createTextNode(feed_updated)])
doc.documentElement.appendChild(updated_node)
# Generator
PROGRAM_NAME = "Zoto"
PROGRAM_URI = "http://www.%s/" % self.domain
VERSION = "3.0"
generator_node = self.create_node(doc, 'generator', {'version': VERSION, 'uri': PROGRAM_URI}, [doc.createTextNode(PROGRAM_NAME)])
doc.documentElement.appendChild(generator_node)
if not globber_result[0]:
# photos
for i in globber_result[1]:
entry_node = doc.createElement('entry')
# Title
item_title = i['title']
title_node = self.create_node(doc, 'title', {}, [doc.createTextNode(item_title)])
entry_node.appendChild(title_node)
# feed links - alternate
item_link = "http://www.%s/%s/detail/#%s" % (self.domain, i['owner_username'], i['media_id'])
link_node = self.create_node(doc, 'link', {'rel': "alternate", 'type': "text/html", 'href': item_link}, [])
entry_node.appendChild(link_node)
# id entry
item_id = "tag:%s,%s:/%s/detail/#%s" % (self.domain, i['date_uploaded'].year, i['owner_username'], i['media_id'])
id_node = self.create_node(doc, 'id', {}, [doc.createTextNode(item_id)])
entry_node.appendChild(id_node)
# published
published = datetime.datetime.utcfromtimestamp(mktime(i['date_uploaded'].timetuple()))
year = published.year
month = published.month
day = published.day
hour = published.hour
minute = published.minute
second = published.second
item_pub = "%s-%2.2d-%2.2dT%2.2d:%2.2d:%2.2dZ" % (year, int(month), day, hour, minute, second)
pub_node = self.create_node(doc, 'published', {}, [doc.createTextNode(item_pub)])
entry_node.appendChild(pub_node)
# updated (borrowing item_pub from published)
updated_node = self.create_node(doc, 'updated', {}, [doc.createTextNode(item_pub)])
entry_node.appendChild(updated_node)
# dc:date.Taken
if not i['date']:
i['date'] = i['date_uploaded']
taken = datetime.datetime.utcfromtimestamp(mktime(i['date'].timetuple()))
year = taken.year
month = taken.month
day = taken.day
hour = taken.hour
minute = taken.minute
second = taken.second
item_taken = "%s-%2.2d-%2.2dT%2.2d:%2.2d:%2.2dZ" % (year, int(month), day, hour, minute, second)
dc_taken_node = self.create_node(doc, 'dc:date.Taken', {}, [doc.createTextNode(item_taken)])
entry_node.appendChild(dc_taken_node)
# do the math for the rendered width or height (using a 240x240 render - size 28)
height_width_constraint = 240 # change this if you change the size we use (which we won't)
image_size_used = 28
if i['current_width'] and i['current_height']:
original_width = i['current_width']
original_height = i['current_height']
else:
original_width = i['original_width']
original_height = i['original_height']
if original_width > original_height:
# landscape
item_width = height_width_constraint
item_height = (height_width_constraint * (original_height*100/original_width))/100
elif original_width < original_height:
# portrait
item_height = height_width_constraint
item_width = (height_width_constraint * (original_width*100/original_height))/100
else:
# square
item_height = height_width_constraint
item_width = height_width_constraint
# content
image_link = "http://www.%s/%s/img/%s/%s.jpg" % (self.domain, i['owner_username'], image_size_used, i['media_id'])
content = '<p><a href="http://www.%s/%s">%s</a> posted a photo</p><p><a href="%s" title="%s"><img src="%s" width="%s" height="%s" alt="%s" style="border: 1px solid #ccc;"/></a></p>' % (self.domain, i['owner_username'], i['owner_username'], item_link, item_title, image_link, item_width, item_height, item_title)
content_node = self.create_node(doc, 'content', {'type': "html"}, [doc.createTextNode(content)])
entry_node.appendChild(content_node)
# author
author_node = self.create_node(doc, 'author', {}, [])
name_node = self.create_node(doc, 'name', {}, [doc.createTextNode(i['owner_username'])])
author_node.appendChild(name_node)
uri_link = "http://www.%s/%s/" % (self.domain, i['owner_username'])
uri_node = self.create_node(doc, 'uri', {}, [doc.createTextNode(uri_link)])
author_node.appendChild(uri_node)
entry_node.appendChild(author_node)
# link enclosure
enclosure_node = self.create_node(doc, 'link', {'type': "image/jpeg", 'rel': "enclosure", 'href': image_link}, [])
entry_node.appendChild(enclosure_node)
# loop through the tags on the photo
for j in i['tag_list']:
tag_name = j
scheme = "http://www.%s/%s/tags/" % (self.domain, i['owner_username'])
tag_node = self.create_node(doc, 'category', {'term': unicode(tag_name, "utf-8"), 'scheme': scheme}, [])
entry_node.appendChild(tag_node)
# tack on the entry_node to the main document
doc.documentElement.appendChild(entry_node)
# build the page and set the type
#data = doc.toprettyxml()
data = doc.toxml('utf-8')
self.request.setHeader('content-type', 'text/xml')
self.request.setHeader('content-length', str(len(data)))
return data
# to be done, kml feeds for google earth exports
def _build_kml_feed(self, user_result, globber_result, album_info):
return "kml feed"
| {
"content_hash": "52a85458ded107df6aba3027ab4f3726",
"timestamp": "",
"source": "github",
"line_count": 618,
"max_line_length": 319,
"avg_line_length": 38.24595469255664,
"alnum_prop": 0.6599255373159587,
"repo_name": "kordless/zoto-server",
"id": "a09a80f9a855bee397ca025659b09586553f4903",
"size": "23636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aztk/web/feeds.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1489011"
},
{
"name": "PHP",
"bytes": "15394"
},
{
"name": "Python",
"bytes": "905967"
},
{
"name": "Shell",
"bytes": "1052"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NoteText',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('note_text', models.CharField(max_length=10000)),
('modified_date', models.DateTimeField(verbose_name='date modified')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag_title', models.CharField(max_length=50)),
],
),
]
| {
"content_hash": "e55048bb96197d0217ef0accb6995a0c",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 114,
"avg_line_length": 29.517241379310345,
"alnum_prop": 0.5549065420560748,
"repo_name": "kokopelli314/hydronote",
"id": "0da60563fa016387bd069d113fff085a4bc42e49",
"size": "929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hydronote/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15412"
},
{
"name": "HTML",
"bytes": "9931"
},
{
"name": "JavaScript",
"bytes": "18470"
},
{
"name": "Python",
"bytes": "18901"
}
],
"symlink_target": ""
} |
import sys
import pandas as pd
import matplotlib.pyplot as plt
def main():
# Assign variables
input_file = sys.argv[1]
output_file = sys.argv[2]
# Load the data
data = pd.read_csv(input_file, delimiter=',')
# Apply the plot function to the loaded data
plt.plot(data)
# Save the plot
plt.savefig(output_file)
# Show the plot
plt.show()
main()
| {
"content_hash": "5d8d3a151b40d562300841576297d3e2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 46,
"avg_line_length": 14.153846153846153,
"alnum_prop": 0.6793478260869565,
"repo_name": "jonahgolden/assignment_9",
"id": "9c1f44be800fed56657cac09e97cf65f70f9887f",
"size": "487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Load_Plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "487"
}
],
"symlink_target": ""
} |
""" Cisco_IOS_XR_asr9k_netflow_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR asr9k\-netflow package operational data.
This module contains definitions
for the following management objects\:
net\-flow\: NetFlow operational data
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class NfmgrFemEdmExpVerEnum(Enum):
"""
NfmgrFemEdmExpVerEnum
Netflow export version
.. data:: V9 = 0
Version 9 export format
.. data:: IP_FIX = 1
IPFIX export format
"""
V9 = 0
IP_FIX = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NfmgrFemEdmExpVerEnum']
class NfmgrFemEdmTransProtoEnum(Enum):
"""
NfmgrFemEdmTransProtoEnum
Netflow export transport protocol
.. data:: UNSPECIFIED = 0
Unspecified transport protocol
.. data:: UDP = 1
UDP transport protocol
"""
UNSPECIFIED = 0
UDP = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NfmgrFemEdmTransProtoEnum']
class UdpAddressFamilyEnum(Enum):
"""
UdpAddressFamilyEnum
Address Family Type
.. data:: IPV4 = 2
IPv4
.. data:: IPV6 = 10
IPv6
"""
IPV4 = 2
IPV6 = 10
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['UdpAddressFamilyEnum']
class NetFlow(object):
"""
NetFlow operational data
.. attribute:: configuration
NetFlow configuration information
**type**\: :py:class:`Configuration <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Configuration>`
.. attribute:: statistics
Node\-specific NetFlow statistics information
**type**\: :py:class:`Statistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Statistics>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.configuration = NetFlow.Configuration()
self.configuration.parent = self
self.statistics = NetFlow.Statistics()
self.statistics.parent = self
class Configuration(object):
"""
NetFlow configuration information
.. attribute:: flow_exporter_maps
Flow exporter map configuration information
**type**\: :py:class:`FlowExporterMaps <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Configuration.FlowExporterMaps>`
.. attribute:: flow_monitor_maps
Flow monitor map configuration information
**type**\: :py:class:`FlowMonitorMaps <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Configuration.FlowMonitorMaps>`
.. attribute:: flow_sampler_maps
Flow sampler map configuration information
**type**\: :py:class:`FlowSamplerMaps <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Configuration.FlowSamplerMaps>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.flow_exporter_maps = NetFlow.Configuration.FlowExporterMaps()
self.flow_exporter_maps.parent = self
self.flow_monitor_maps = NetFlow.Configuration.FlowMonitorMaps()
self.flow_monitor_maps.parent = self
self.flow_sampler_maps = NetFlow.Configuration.FlowSamplerMaps()
self.flow_sampler_maps.parent = self
class FlowExporterMaps(object):
"""
Flow exporter map configuration information
.. attribute:: flow_exporter_map
Flow exporter map information
**type**\: list of :py:class:`FlowExporterMap <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Configuration.FlowExporterMaps.FlowExporterMap>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.flow_exporter_map = YList()
self.flow_exporter_map.parent = self
self.flow_exporter_map.name = 'flow_exporter_map'
class FlowExporterMap(object):
"""
Flow exporter map information
.. attribute:: exporter_name <key>
Exporter name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: collector
Export collector array
**type**\: list of :py:class:`Collector <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Configuration.FlowExporterMaps.FlowExporterMap.Collector>`
.. attribute:: id
Unique ID in the global flow exporter ID space
**type**\: int
**range:** 0..4294967295
.. attribute:: name
Name of the flow exporter map
**type**\: str
.. attribute:: version
Export version data
**type**\: :py:class:`Version <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Configuration.FlowExporterMaps.FlowExporterMap.Version>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.exporter_name = None
self.collector = YList()
self.collector.parent = self
self.collector.name = 'collector'
self.id = None
self.name = None
self.version = NetFlow.Configuration.FlowExporterMaps.FlowExporterMap.Version()
self.version.parent = self
class Version(object):
"""
Export version data
.. attribute:: ipfix
ipfix
**type**\: :py:class:`Ipfix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Configuration.FlowExporterMaps.FlowExporterMap.Version.Ipfix>`
.. attribute:: version
version
**type**\: :py:class:`NfmgrFemEdmExpVerEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NfmgrFemEdmExpVerEnum>`
.. attribute:: version9
version9
**type**\: :py:class:`Version9 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Configuration.FlowExporterMaps.FlowExporterMap.Version.Version9>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.ipfix = NetFlow.Configuration.FlowExporterMaps.FlowExporterMap.Version.Ipfix()
self.ipfix.parent = self
self.version = None
self.version9 = NetFlow.Configuration.FlowExporterMaps.FlowExporterMap.Version.Version9()
self.version9.parent = self
class Version9(object):
"""
version9
.. attribute:: common_template_export_timeout
Common template export timeout in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: data_template_export_timeout
Data template export timeout in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: interface_table_export_timeout
Interface table export timeout in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: options_template_export_timeout
Options template export timeout in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: sampler_table_export_timeout
Sampler table export timeout in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_table_export_timeout
VRF table export timeout in seconds
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.common_template_export_timeout = None
self.data_template_export_timeout = None
self.interface_table_export_timeout = None
self.options_template_export_timeout = None
self.sampler_table_export_timeout = None
self.vrf_table_export_timeout = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:version9'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.common_template_export_timeout is not None:
return True
if self.data_template_export_timeout is not None:
return True
if self.interface_table_export_timeout is not None:
return True
if self.options_template_export_timeout is not None:
return True
if self.sampler_table_export_timeout is not None:
return True
if self.vrf_table_export_timeout is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Configuration.FlowExporterMaps.FlowExporterMap.Version.Version9']['meta_info']
class Ipfix(object):
"""
ipfix
.. attribute:: common_template_export_timeout
Common template export timeout in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: data_template_export_timeout
Data template export timeout in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: interface_table_export_timeout
Interface table export timeout in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: options_template_export_timeout
Options template export timeout in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: sampler_table_export_timeout
Sampler table export timeout in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_table_export_timeout
VRF table export timeout in seconds
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.common_template_export_timeout = None
self.data_template_export_timeout = None
self.interface_table_export_timeout = None
self.options_template_export_timeout = None
self.sampler_table_export_timeout = None
self.vrf_table_export_timeout = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:ipfix'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.common_template_export_timeout is not None:
return True
if self.data_template_export_timeout is not None:
return True
if self.interface_table_export_timeout is not None:
return True
if self.options_template_export_timeout is not None:
return True
if self.sampler_table_export_timeout is not None:
return True
if self.vrf_table_export_timeout is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Configuration.FlowExporterMaps.FlowExporterMap.Version.Ipfix']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:version'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.ipfix is not None and self.ipfix._has_data():
return True
if self.version is not None:
return True
if self.version9 is not None and self.version9._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Configuration.FlowExporterMaps.FlowExporterMap.Version']['meta_info']
class Collector(object):
"""
Export collector array
.. attribute:: destination_address
Destination IPv4 address in AAA.BBB.CCC.DDD format
**type**\: str
.. attribute:: destination_port
Transport destination port number
**type**\: int
**range:** 0..65535
.. attribute:: dscp
DSCP
**type**\: int
**range:** 0..255
.. attribute:: source_address
Source IPv4 address in AAA.BBB.CCC.DDD format
**type**\: str
.. attribute:: source_interface
Source interface name
**type**\: str
.. attribute:: transport_protocol
Transport protocol
**type**\: :py:class:`NfmgrFemEdmTransProtoEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NfmgrFemEdmTransProtoEnum>`
.. attribute:: vrf_name
VRF name
**type**\: str
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.destination_address = None
self.destination_port = None
self.dscp = None
self.source_address = None
self.source_interface = None
self.transport_protocol = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:collector'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.destination_address is not None:
return True
if self.destination_port is not None:
return True
if self.dscp is not None:
return True
if self.source_address is not None:
return True
if self.source_interface is not None:
return True
if self.transport_protocol is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Configuration.FlowExporterMaps.FlowExporterMap.Collector']['meta_info']
@property
def _common_path(self):
if self.exporter_name is None:
raise YPYModelError('Key property exporter_name is None')
return '/Cisco-IOS-XR-asr9k-netflow-oper:net-flow/Cisco-IOS-XR-asr9k-netflow-oper:configuration/Cisco-IOS-XR-asr9k-netflow-oper:flow-exporter-maps/Cisco-IOS-XR-asr9k-netflow-oper:flow-exporter-map[Cisco-IOS-XR-asr9k-netflow-oper:exporter-name = ' + str(self.exporter_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.exporter_name is not None:
return True
if self.collector is not None:
for child_ref in self.collector:
if child_ref._has_data():
return True
if self.id is not None:
return True
if self.name is not None:
return True
if self.version is not None and self.version._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Configuration.FlowExporterMaps.FlowExporterMap']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-asr9k-netflow-oper:net-flow/Cisco-IOS-XR-asr9k-netflow-oper:configuration/Cisco-IOS-XR-asr9k-netflow-oper:flow-exporter-maps'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.flow_exporter_map is not None:
for child_ref in self.flow_exporter_map:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Configuration.FlowExporterMaps']['meta_info']
class FlowMonitorMaps(object):
"""
Flow monitor map configuration information
.. attribute:: flow_monitor_map
Flow monitor map information
**type**\: list of :py:class:`FlowMonitorMap <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Configuration.FlowMonitorMaps.FlowMonitorMap>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.flow_monitor_map = YList()
self.flow_monitor_map.parent = self
self.flow_monitor_map.name = 'flow_monitor_map'
class FlowMonitorMap(object):
"""
Flow monitor map information
.. attribute:: monitor_name <key>
Monitor name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: cache_active_timeout
Cache active flow timeout in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: cache_aging_mode
Aging mode for flow cache
**type**\: str
.. attribute:: cache_inactive_timeout
Cache inactive flow timeout in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: cache_max_entry
Max num of entries in flow cache
**type**\: int
**range:** 0..4294967295
.. attribute:: cache_timeout_rate_limit
Maximum number of entries to age each second
**type**\: int
**range:** 0..4294967295
.. attribute:: cache_update_timeout
Cache update timeout in seconds
**type**\: int
**range:** 0..4294967295
.. attribute:: exporter
Name of the flow exporters used by the flow monitor
**type**\: list of :py:class:`Exporter <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Configuration.FlowMonitorMaps.FlowMonitorMap.Exporter>`
.. attribute:: id
Unique ID in the global flow monitor ID space
**type**\: int
**range:** 0..4294967295
.. attribute:: name
Name of the flow monitor map
**type**\: str
.. attribute:: number_of_labels
Number of MPLS labels in key
**type**\: int
**range:** 0..4294967295
.. attribute:: options
Options applied to the flow monitor
**type**\: int
**range:** 0..4294967295
.. attribute:: record_map
Name of the flow record map
**type**\: str
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.monitor_name = None
self.cache_active_timeout = None
self.cache_aging_mode = None
self.cache_inactive_timeout = None
self.cache_max_entry = None
self.cache_timeout_rate_limit = None
self.cache_update_timeout = None
self.exporter = YList()
self.exporter.parent = self
self.exporter.name = 'exporter'
self.id = None
self.name = None
self.number_of_labels = None
self.options = None
self.record_map = None
class Exporter(object):
"""
Name of the flow exporters used by the flow
monitor
.. attribute:: name
Exporter name
**type**\: str
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:exporter'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Configuration.FlowMonitorMaps.FlowMonitorMap.Exporter']['meta_info']
@property
def _common_path(self):
if self.monitor_name is None:
raise YPYModelError('Key property monitor_name is None')
return '/Cisco-IOS-XR-asr9k-netflow-oper:net-flow/Cisco-IOS-XR-asr9k-netflow-oper:configuration/Cisco-IOS-XR-asr9k-netflow-oper:flow-monitor-maps/Cisco-IOS-XR-asr9k-netflow-oper:flow-monitor-map[Cisco-IOS-XR-asr9k-netflow-oper:monitor-name = ' + str(self.monitor_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.monitor_name is not None:
return True
if self.cache_active_timeout is not None:
return True
if self.cache_aging_mode is not None:
return True
if self.cache_inactive_timeout is not None:
return True
if self.cache_max_entry is not None:
return True
if self.cache_timeout_rate_limit is not None:
return True
if self.cache_update_timeout is not None:
return True
if self.exporter is not None:
for child_ref in self.exporter:
if child_ref._has_data():
return True
if self.id is not None:
return True
if self.name is not None:
return True
if self.number_of_labels is not None:
return True
if self.options is not None:
return True
if self.record_map is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Configuration.FlowMonitorMaps.FlowMonitorMap']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-asr9k-netflow-oper:net-flow/Cisco-IOS-XR-asr9k-netflow-oper:configuration/Cisco-IOS-XR-asr9k-netflow-oper:flow-monitor-maps'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.flow_monitor_map is not None:
for child_ref in self.flow_monitor_map:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Configuration.FlowMonitorMaps']['meta_info']
class FlowSamplerMaps(object):
"""
Flow sampler map configuration information
.. attribute:: flow_sampler_map
Flow sampler map information
**type**\: list of :py:class:`FlowSamplerMap <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Configuration.FlowSamplerMaps.FlowSamplerMap>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.flow_sampler_map = YList()
self.flow_sampler_map.parent = self
self.flow_sampler_map.name = 'flow_sampler_map'
class FlowSamplerMap(object):
"""
Flow sampler map information
.. attribute:: sampler_name <key>
Sampler name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: id
Unique ID in the global flow sampler ID space
**type**\: int
**range:** 0..4294967295
.. attribute:: name
Name of the flow sampler map
**type**\: str
.. attribute:: sampling_mode
Sampling mode and parameters
**type**\: str
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.sampler_name = None
self.id = None
self.name = None
self.sampling_mode = None
@property
def _common_path(self):
if self.sampler_name is None:
raise YPYModelError('Key property sampler_name is None')
return '/Cisco-IOS-XR-asr9k-netflow-oper:net-flow/Cisco-IOS-XR-asr9k-netflow-oper:configuration/Cisco-IOS-XR-asr9k-netflow-oper:flow-sampler-maps/Cisco-IOS-XR-asr9k-netflow-oper:flow-sampler-map[Cisco-IOS-XR-asr9k-netflow-oper:sampler-name = ' + str(self.sampler_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.sampler_name is not None:
return True
if self.id is not None:
return True
if self.name is not None:
return True
if self.sampling_mode is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Configuration.FlowSamplerMaps.FlowSamplerMap']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-asr9k-netflow-oper:net-flow/Cisco-IOS-XR-asr9k-netflow-oper:configuration/Cisco-IOS-XR-asr9k-netflow-oper:flow-sampler-maps'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.flow_sampler_map is not None:
for child_ref in self.flow_sampler_map:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Configuration.FlowSamplerMaps']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-asr9k-netflow-oper:net-flow/Cisco-IOS-XR-asr9k-netflow-oper:configuration'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.flow_exporter_maps is not None and self.flow_exporter_maps._has_data():
return True
if self.flow_monitor_maps is not None and self.flow_monitor_maps._has_data():
return True
if self.flow_sampler_maps is not None and self.flow_sampler_maps._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Configuration']['meta_info']
class Statistics(object):
"""
Node\-specific NetFlow statistics information
.. attribute:: statistic
NetFlow statistics information for a particular node
**type**\: list of :py:class:`Statistic <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Statistics.Statistic>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.statistic = YList()
self.statistic.parent = self
self.statistic.name = 'statistic'
class Statistic(object):
"""
NetFlow statistics information for a particular
node
.. attribute:: node <key>
Node location
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: producer
NetFlow producer statistics
**type**\: :py:class:`Producer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Statistics.Statistic.Producer>`
.. attribute:: server
NetFlow server statistics
**type**\: :py:class:`Server <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Statistics.Statistic.Server>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node = None
self.producer = NetFlow.Statistics.Statistic.Producer()
self.producer.parent = self
self.server = NetFlow.Statistics.Statistic.Server()
self.server.parent = self
class Producer(object):
"""
NetFlow producer statistics
.. attribute:: statistics
Statistics information
**type**\: :py:class:`Statistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Statistics.Statistic.Producer.Statistics>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.statistics = NetFlow.Statistics.Statistic.Producer.Statistics()
self.statistics.parent = self
class Statistics(object):
"""
Statistics information
.. attribute:: drops_no_space
Drops (no space)
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: drops_others
Drops (others)
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: flow_packet_counts
Number of Rxed Flow Packets
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: ipv4_egress_flows
IPv4 egress flows
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: ipv4_ingress_flows
IPv4 ingress flows
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: ipv6_egress_flows
IPv6 egress flows
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: ipv6_ingress_flows
IPv6 ingress flows
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: last_cleared
Last time Statistics cleared in 'Mon Jan 1 12\:00 \:00 2xxx' format
**type**\: str
.. attribute:: mpls_egress_flows
MPLS egress flows
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: mpls_ingress_flows
MPLS ingress flows
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: spp_rx_counts
Number of Rxed SPP Packets
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: unknown_egress_flows
Unknown egress flows
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: unknown_ingress_flows
Unknown ingress flows
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: waiting_servers
Number of waiting servers
**type**\: long
**range:** 0..18446744073709551615
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.drops_no_space = None
self.drops_others = None
self.flow_packet_counts = None
self.ipv4_egress_flows = None
self.ipv4_ingress_flows = None
self.ipv6_egress_flows = None
self.ipv6_ingress_flows = None
self.last_cleared = None
self.mpls_egress_flows = None
self.mpls_ingress_flows = None
self.spp_rx_counts = None
self.unknown_egress_flows = None
self.unknown_ingress_flows = None
self.waiting_servers = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:statistics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.drops_no_space is not None:
return True
if self.drops_others is not None:
return True
if self.flow_packet_counts is not None:
return True
if self.ipv4_egress_flows is not None:
return True
if self.ipv4_ingress_flows is not None:
return True
if self.ipv6_egress_flows is not None:
return True
if self.ipv6_ingress_flows is not None:
return True
if self.last_cleared is not None:
return True
if self.mpls_egress_flows is not None:
return True
if self.mpls_ingress_flows is not None:
return True
if self.spp_rx_counts is not None:
return True
if self.unknown_egress_flows is not None:
return True
if self.unknown_ingress_flows is not None:
return True
if self.waiting_servers is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Statistics.Statistic.Producer.Statistics']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:producer'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.statistics is not None and self.statistics._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Statistics.Statistic.Producer']['meta_info']
class Server(object):
"""
NetFlow server statistics
.. attribute:: flow_exporters
Flow exporter information
**type**\: :py:class:`FlowExporters <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Statistics.Statistic.Server.FlowExporters>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.flow_exporters = NetFlow.Statistics.Statistic.Server.FlowExporters()
self.flow_exporters.parent = self
class FlowExporters(object):
"""
Flow exporter information
.. attribute:: flow_exporter
Exporter information
**type**\: list of :py:class:`FlowExporter <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.flow_exporter = YList()
self.flow_exporter.parent = self
self.flow_exporter.name = 'flow_exporter'
class FlowExporter(object):
"""
Exporter information
.. attribute:: exporter_name <key>
Exporter name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: exporter
Statistics information for the exporter
**type**\: :py:class:`Exporter <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter.Exporter>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.exporter_name = None
self.exporter = NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter.Exporter()
self.exporter.parent = self
class Exporter(object):
"""
Statistics information for the exporter
.. attribute:: statistic
Array of flow exporters
**type**\: list of :py:class:`Statistic <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter.Exporter.Statistic>`
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.statistic = YList()
self.statistic.parent = self
self.statistic.name = 'statistic'
class Statistic(object):
"""
Array of flow exporters
.. attribute:: collector
Statistics of all collectors
**type**\: list of :py:class:`Collector <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter.Exporter.Statistic.Collector>`
.. attribute:: memory_usage
Memory usage
**type**\: int
**range:** 0..4294967295
.. attribute:: name
Exporter name
**type**\: str
.. attribute:: used_by_flow_monitor
List of flow monitors that use the exporter
**type**\: list of str
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.collector = YList()
self.collector.parent = self
self.collector.name = 'collector'
self.memory_usage = None
self.name = None
self.used_by_flow_monitor = YLeafList()
self.used_by_flow_monitor.parent = self
self.used_by_flow_monitor.name = 'used_by_flow_monitor'
class Collector(object):
"""
Statistics of all collectors
.. attribute:: bytes_dropped
Bytes dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: bytes_sent
Bytes sent
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: destination_address
Destination address
**type**\: :py:class:`DestinationAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter.Exporter.Statistic.Collector.DestinationAddress>`
.. attribute:: destination_port
Destination port number
**type**\: int
**range:** 0..65535
.. attribute:: exporter_state
Exporter state
**type**\: str
.. attribute:: flow_bytes_dropped
Flow bytes dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: flow_bytes_sent
Flow bytes sent
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: flows_dropped
Flows dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: flows_sent
Flows sent
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: last_hour_bytes_sent
Total bytes exported over the last one hour
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: last_hour_flows_sent
Total flows exported over the of last one hour
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: last_hour_packest_sent
Total packets exported over the last one hour
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: last_minute_bytes_sent
Total bytes exported over the last one minute
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: last_minute_flows_sent
Total flows exported over the last one minute
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: last_minute_packets
Total packets exported over the last one minute
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: last_second_bytes_sent
Total bytes exported over the last one second
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: last_second_flows_sent
Total flows exported over the last one second
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: last_second_packets_sent
Total packets exported over the last one second
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: option_data_bytes_dropped
Option data dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: option_data_bytes_sent
Option data bytes sent
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: option_data_dropped
Option data dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: option_data_sent
Option data sent
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: option_template_bytes_dropped
Option template bytes dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: option_template_bytes_sent
Option template bytes sent
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: option_templates_dropped
Option templates dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: option_templates_sent
Option templates sent
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: packets_dropped
Packets dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: packets_sent
Packets sent
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: souce_port
Source port number
**type**\: int
**range:** 0..65535
.. attribute:: source_address
Source address
**type**\: :py:class:`SourceAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter.Exporter.Statistic.Collector.SourceAddress>`
.. attribute:: template_bytes_dropped
Template bytes dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: template_bytes_sent
Template bytes sent
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: templates_dropped
Templates dropped
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: templates_sent
Templates sent
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: transport_protocol
Transport protocol
**type**\: str
.. attribute:: vrf_name
VRF Name
**type**\: str
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bytes_dropped = None
self.bytes_sent = None
self.destination_address = NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter.Exporter.Statistic.Collector.DestinationAddress()
self.destination_address.parent = self
self.destination_port = None
self.exporter_state = None
self.flow_bytes_dropped = None
self.flow_bytes_sent = None
self.flows_dropped = None
self.flows_sent = None
self.last_hour_bytes_sent = None
self.last_hour_flows_sent = None
self.last_hour_packest_sent = None
self.last_minute_bytes_sent = None
self.last_minute_flows_sent = None
self.last_minute_packets = None
self.last_second_bytes_sent = None
self.last_second_flows_sent = None
self.last_second_packets_sent = None
self.option_data_bytes_dropped = None
self.option_data_bytes_sent = None
self.option_data_dropped = None
self.option_data_sent = None
self.option_template_bytes_dropped = None
self.option_template_bytes_sent = None
self.option_templates_dropped = None
self.option_templates_sent = None
self.packets_dropped = None
self.packets_sent = None
self.souce_port = None
self.source_address = NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter.Exporter.Statistic.Collector.SourceAddress()
self.source_address.parent = self
self.template_bytes_dropped = None
self.template_bytes_sent = None
self.templates_dropped = None
self.templates_sent = None
self.transport_protocol = None
self.vrf_name = None
class DestinationAddress(object):
"""
Destination address
.. attribute:: af_name
AFName
**type**\: :py:class:`UdpAddressFamilyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.UdpAddressFamilyEnum>`
.. attribute:: ipv4_address
IPv4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6_address
IPv6 Address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.af_name = None
self.ipv4_address = None
self.ipv6_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:destination-address'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.af_name is not None:
return True
if self.ipv4_address is not None:
return True
if self.ipv6_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter.Exporter.Statistic.Collector.DestinationAddress']['meta_info']
class SourceAddress(object):
"""
Source address
.. attribute:: af_name
AFName
**type**\: :py:class:`UdpAddressFamilyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_netflow_oper.UdpAddressFamilyEnum>`
.. attribute:: ipv4_address
IPv4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6_address
IPv6 Address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'asr9k-netflow-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.af_name = None
self.ipv4_address = None
self.ipv6_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:source-address'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.af_name is not None:
return True
if self.ipv4_address is not None:
return True
if self.ipv6_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter.Exporter.Statistic.Collector.SourceAddress']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:collector'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.bytes_dropped is not None:
return True
if self.bytes_sent is not None:
return True
if self.destination_address is not None and self.destination_address._has_data():
return True
if self.destination_port is not None:
return True
if self.exporter_state is not None:
return True
if self.flow_bytes_dropped is not None:
return True
if self.flow_bytes_sent is not None:
return True
if self.flows_dropped is not None:
return True
if self.flows_sent is not None:
return True
if self.last_hour_bytes_sent is not None:
return True
if self.last_hour_flows_sent is not None:
return True
if self.last_hour_packest_sent is not None:
return True
if self.last_minute_bytes_sent is not None:
return True
if self.last_minute_flows_sent is not None:
return True
if self.last_minute_packets is not None:
return True
if self.last_second_bytes_sent is not None:
return True
if self.last_second_flows_sent is not None:
return True
if self.last_second_packets_sent is not None:
return True
if self.option_data_bytes_dropped is not None:
return True
if self.option_data_bytes_sent is not None:
return True
if self.option_data_dropped is not None:
return True
if self.option_data_sent is not None:
return True
if self.option_template_bytes_dropped is not None:
return True
if self.option_template_bytes_sent is not None:
return True
if self.option_templates_dropped is not None:
return True
if self.option_templates_sent is not None:
return True
if self.packets_dropped is not None:
return True
if self.packets_sent is not None:
return True
if self.souce_port is not None:
return True
if self.source_address is not None and self.source_address._has_data():
return True
if self.template_bytes_dropped is not None:
return True
if self.template_bytes_sent is not None:
return True
if self.templates_dropped is not None:
return True
if self.templates_sent is not None:
return True
if self.transport_protocol is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter.Exporter.Statistic.Collector']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:statistic'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.collector is not None:
for child_ref in self.collector:
if child_ref._has_data():
return True
if self.memory_usage is not None:
return True
if self.name is not None:
return True
if self.used_by_flow_monitor is not None:
for child in self.used_by_flow_monitor:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter.Exporter.Statistic']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:exporter'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.statistic is not None:
for child_ref in self.statistic:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter.Exporter']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.exporter_name is None:
raise YPYModelError('Key property exporter_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:flow-exporter[Cisco-IOS-XR-asr9k-netflow-oper:exporter-name = ' + str(self.exporter_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.exporter_name is not None:
return True
if self.exporter is not None and self.exporter._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Statistics.Statistic.Server.FlowExporters.FlowExporter']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:flow-exporters'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.flow_exporter is not None:
for child_ref in self.flow_exporter:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Statistics.Statistic.Server.FlowExporters']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-netflow-oper:server'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.flow_exporters is not None and self.flow_exporters._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Statistics.Statistic.Server']['meta_info']
@property
def _common_path(self):
if self.node is None:
raise YPYModelError('Key property node is None')
return '/Cisco-IOS-XR-asr9k-netflow-oper:net-flow/Cisco-IOS-XR-asr9k-netflow-oper:statistics/Cisco-IOS-XR-asr9k-netflow-oper:statistic[Cisco-IOS-XR-asr9k-netflow-oper:node = ' + str(self.node) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.node is not None:
return True
if self.producer is not None and self.producer._has_data():
return True
if self.server is not None and self.server._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Statistics.Statistic']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-asr9k-netflow-oper:net-flow/Cisco-IOS-XR-asr9k-netflow-oper:statistics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.statistic is not None:
for child_ref in self.statistic:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow.Statistics']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-asr9k-netflow-oper:net-flow'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.configuration is not None and self.configuration._has_data():
return True
if self.statistics is not None and self.statistics._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_netflow_oper as meta
return meta._meta_table['NetFlow']['meta_info']
| {
"content_hash": "f70aca7eb2f70b8d8ee73eb65c0e5514",
"timestamp": "",
"source": "github",
"line_count": 2317,
"max_line_length": 298,
"avg_line_length": 42.01337936987484,
"alnum_prop": 0.38417997842724333,
"repo_name": "abhikeshav/ydk-py",
"id": "9c09a0e5ed913a456084bedf69d57132cdbd2e83",
"size": "97345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_asr9k_netflow_oper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117934"
}
],
"symlink_target": ""
} |
import traceback
import numpy as np
from ..external.qt.QtCore import Qt, Signal
from ..external.qt.QtGui import (QMainWindow, QWidget,
QHBoxLayout, QTabWidget,
QComboBox, QFormLayout, QPushButton,
QAction, QTextEdit, QFont, QDialog,
QDialogButtonBox, QLineEdit,
QDoubleValidator, QCheckBox, QGridLayout,
QLabel, QMdiSubWindow)
from ..clients.profile_viewer import ProfileViewer
from .widgets.mpl_widget import MplWidget
from .mouse_mode import SpectrumExtractorMode
from ..core.callback_property import add_callback, ignore_callback
from ..core.util import Pointer
from ..core import Subset
from ..core.exceptions import IncompatibleAttribute
from .glue_toolbar import GlueToolbar
from .qtutil import load_ui, nonpartial, Worker
from .widget_properties import CurrentComboProperty
from ..core.aggregate import Aggregate
from .mime import LAYERS_MIME_TYPE
from .simpleforms import build_form_item
from ..config import fit_plugin
class Extractor(object):
# Warning:
# Coordinate conversion is not well-defined if pix2world is not
# monotonic!
@staticmethod
def abcissa(data, axis):
slc = [0 for _ in data.shape]
slc[axis] = slice(None, None)
att = data.get_world_component_id(axis)
return data[att, tuple(slc)].ravel()
@staticmethod
def spectrum(data, attribute, roi, slc, zaxis):
xaxis = slc.index('x')
yaxis = slc.index('y')
ndim, nz = data.ndim, data.shape[zaxis]
l, r, b, t = roi.xmin, roi.xmax, roi.ymin, roi.ymax
shp = data.shape
l, r = np.clip([l, r], 0, shp[xaxis])
b, t = np.clip([b, t], 0, shp[yaxis])
# extract sub-slice, without changing dimension
slc = [slice(s, s + 1)
if s not in ['x', 'y'] else slice(None)
for s in slc]
slc[xaxis] = slice(l, r)
slc[yaxis] = slice(b, t)
slc[zaxis] = slice(None)
x = Extractor.abcissa(data, zaxis)
data = data[attribute, tuple(slc)]
finite = np.isfinite(data)
assert data.ndim == ndim
for i in reversed(list(range(ndim))):
if i != zaxis:
data = np.nansum(data, axis=i)
finite = finite.sum(axis=i)
assert data.ndim == 1
assert data.size == nz
data = (1. * data / finite).ravel()
return x, data
@staticmethod
def world2pixel(data, axis, value):
x = Extractor.abcissa(data, axis)
if x.size > 1 and (x[1] < x[0]):
x = x[::-1]
result = x.size - np.searchsorted(x, value) - 1
else:
result = np.searchsorted(x, value)
return np.clip(result, 0, x.size - 1)
@staticmethod
def pixel2world(data, axis, value):
x = Extractor.abcissa(data, axis)
return x[np.clip(value, 0, x.size - 1)]
@staticmethod
def subset_spectrum(subset, attribute, slc, zaxis):
"""
Extract a spectrum from a subset.
This makes a mask of the subset in the **current slice**,
and extracts a tube of this shape over all slices along ``zaxis``.
In other words, the variation of the subset along ``zaxis`` is ignored,
and only the interaction of the subset and the slice is relevant.
:param subset: A :class:`~glue.core.subset.Subset`
:param attribute: The :class:`~glue.core.data.ComponentID` to extract
:param slc: A tuple describing the slice
:param zaxis: Which axis to integrate over
"""
data = subset.data
x = Extractor.abcissa(data, zaxis)
view = [slice(s, s + 1)
if s not in ['x', 'y'] else slice(None)
for s in slc]
mask = np.squeeze(subset.to_mask(view))
if slc.index('x') < slc.index('y'):
mask = mask.T
w = np.where(mask)
view[slc.index('x')] = w[1]
view[slc.index('y')] = w[0]
result = np.empty(x.size)
# treat each channel separately, to reduce memory storage
for i in xrange(data.shape[zaxis]):
view[zaxis] = i
val = data[attribute, view]
result[i] = np.nansum(val) / np.isfinite(val).sum()
y = result
return x, y
class SpectrumContext(object):
client = Pointer('main.client')
data = Pointer('main.data')
profile_axis = Pointer('main.profile_axis')
canvas = Pointer('main.canvas')
profile = Pointer('main.profile')
def __init__(self, main):
self.main = main
self.grip = None
self.panel = None
self.widget = None
self._setup_grip()
self._setup_widget()
self._connect()
def _setup_grip(self):
raise NotImplementedError()
def _setup_widget(self):
raise NotImplementedError()
def _connect(self):
pass
def set_enabled(self, enabled):
self.enable() if enabled else self.disable()
def enable(self):
if self.grip is not None:
self.grip.enable()
def disable(self):
if self.grip is not None:
self.grip.disable()
def recenter(self, lim):
"""Re-center the grip to the given x axlis limit tuple"""
if self.grip is None:
return
if hasattr(self.grip, 'value'):
self.grip.value = sum(lim) / 2.
return
# Range grip
cen = sum(lim) / 2
wid = max(lim) - min(lim)
self.grip.range = cen - wid / 4, cen + wid / 4
class NavContext(SpectrumContext):
def _setup_grip(self):
def _set_client_from_grip(value):
"""Update client.slice given grip value"""
slc = list(self.client.slice)
# client.slice stored in pixel coords
value = Extractor.world2pixel(
self.data,
self.profile_axis, value)
slc[self.profile_axis] = value
self.client.slice = tuple(slc)
def _set_grip_from_client(slc):
"""Update grip.value given client.slice"""
# grip.value is stored in world coordinates
val = slc[self.profile_axis]
val = Extractor.pixel2world(self.data, self.profile_axis, val)
# If pix2world not monotonic, this can trigger infinite recursion.
# Avoid by disabling callback loop
# XXX better to specifically ignore _set_client_from_grip
with ignore_callback(self.client, 'slice'):
self.grip.value = val
self.grip = self.main.profile.new_value_grip()
add_callback(self.client, 'slice', _set_grip_from_client)
add_callback(self.grip, 'value', _set_client_from_grip)
def _connect(self):
pass
def _setup_widget(self):
self.widget = QTextEdit()
self.widget.setHtml("To <b> slide </b> through the cube, "
"drag the handle or double-click<br><br><br>"
"To make a <b> new profile </b>, "
"click-drag a new box in the image, or drag "
"a subset onto the plot to the left")
self.widget.setTextInteractionFlags(Qt.NoTextInteraction)
class CollapseContext(SpectrumContext):
def _setup_grip(self):
self.grip = self.main.profile.new_range_grip()
def _setup_widget(self):
w = QWidget()
l = QFormLayout()
w.setLayout(l)
combo = QComboBox()
combo.addItem("Mean", userData=Aggregate.mean)
combo.addItem("Median", userData=Aggregate.median)
combo.addItem("Max", userData=Aggregate.max)
combo.addItem("Centroid", userData=Aggregate.mom1)
combo.addItem("Linewidth", userData=Aggregate.mom2)
run = QPushButton("Collapse")
self._run = run
l.addRow("", combo)
l.addRow("", run)
self.widget = w
self._combo = combo
def _connect(self):
self._run.clicked.connect(nonpartial(self._aggregate))
def _aggregate(self):
func = self._combo.itemData(self._combo.currentIndex())
rng = list(self.grip.range)
rng[1] += 1
rng = Extractor.world2pixel(self.data,
self.profile_axis,
rng)
agg = Aggregate(self.data, self.client.display_attribute,
self.main.profile_axis, self.client.slice, rng)
im = func(agg)
self.client.override_image(im)
class ConstraintsWidget(QWidget):
def __init__(self, constraints, parent=None):
super(ConstraintsWidget, self).__init__(parent)
self.constraints = constraints
self.layout = QGridLayout()
self.layout.setContentsMargins(2, 2, 2, 2)
self.layout.setSpacing(4)
self.setLayout(self.layout)
self.layout.addWidget(QLabel("Estimate"), 0, 1)
self.layout.addWidget(QLabel("Fixed"), 0, 2)
self.layout.addWidget(QLabel("Bounded"), 0, 3)
self.layout.addWidget(QLabel("Lower Bound"), 0, 4)
self.layout.addWidget(QLabel("Upper Bound"), 0, 5)
self._widgets = {}
names = sorted(list(self.constraints.keys()))
for k in names:
row = []
w = QLabel(k)
row.append(w)
v = QDoubleValidator()
e = QLineEdit()
e.setValidator(v)
e.setText(str(constraints[k]['value'] or ''))
row.append(e)
w = QCheckBox()
w.setChecked(constraints[k]['fixed'])
fix = w
row.append(w)
w = QCheckBox()
limits = constraints[k]['limits']
w.setChecked(limits is not None)
bound = w
row.append(w)
e = QLineEdit()
e.setValidator(v)
if limits is not None:
e.setText(str(limits[0]))
row.append(e)
e = QLineEdit()
e.setValidator(v)
if limits is not None:
e.setText(str(limits[1]))
row.append(e)
def unset(w):
def result(active):
if active:
w.setChecked(False)
return result
fix.toggled.connect(unset(bound))
bound.toggled.connect(unset(fix))
self._widgets[k] = row
for i, row in enumerate(names, 1):
for j, widget in enumerate(self._widgets[row]):
self.layout.addWidget(widget, i, j)
def settings(self, name):
row = self._widgets[name]
name, value, fixed, limited, lo, hi = row
value = float(value.text()) if value.text() else None
fixed = fixed.isChecked()
limited = limited.isChecked()
lo = lo.text()
hi = hi.text()
limited = limited and not ((not lo) or (not hi))
limits = None if not limited else [float(lo), float(hi)]
return dict(value=value, fixed=fixed, limits=limits)
def update_constraints(self, fitter):
for name in self._widgets:
s = self.settings(name)
fitter.set_constraint(name, **s)
class FitSettingsWidget(QDialog):
def __init__(self, fitter, parent=None):
super(FitSettingsWidget, self).__init__(parent)
self.fitter = fitter
self._build_form()
self._connect()
self.setModal(True)
def _build_form(self):
fitter = self.fitter
l = QFormLayout()
options = fitter.options
self.widgets = {}
self.forms = {}
for k in sorted(options):
item = build_form_item(fitter, k)
l.addRow(item.label, item.widget)
self.widgets[k] = item.widget
self.forms[k] = item # need to prevent garbage collection
constraints = fitter.constraints
if constraints:
self.constraints = ConstraintsWidget(constraints)
l.addRow(self.constraints)
else:
self.constraints = None
self.okcancel = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
l.addRow(self.okcancel)
self.setLayout(l)
def _connect(self):
self.okcancel.accepted.connect(self.accept)
self.okcancel.rejected.connect(self.reject)
self.accepted.connect(self.update_fitter_from_settings)
def update_fitter_from_settings(self):
for k, v in self.widgets.items():
setattr(self.fitter, k, v.value())
if self.constraints is not None:
self.constraints.update_constraints(self.fitter)
class FitContext(SpectrumContext):
error = CurrentComboProperty('ui.uncertainty_combo')
fitter = CurrentComboProperty('ui.profile_combo')
def _setup_grip(self):
self.grip = self.main.profile.new_range_grip()
def _setup_widget(self):
self.ui = load_ui('spectrum_fit_panel')
self.ui.uncertainty_combo.hide()
self.ui.uncertainty_label.hide()
font = QFont("Courier")
font.setStyleHint(font.Monospace)
self.ui.results_box.document().setDefaultFont(font)
self.ui.results_box.setLineWrapMode(self.ui.results_box.NoWrap)
self.widget = self.ui
for fitter in list(fit_plugin):
self.ui.profile_combo.addItem(fitter.label,
userData=fitter())
def _edit_model_options(self):
d = FitSettingsWidget(self.fitter)
d.exec_()
def _connect(self):
self.ui.fit_button.clicked.connect(nonpartial(self.fit))
self.ui.settings_button.clicked.connect(
nonpartial(self._edit_model_options))
def fit(self):
"""
Fit a model to the data
The fitting happens on a dedicated thread, to keep the UI
responsive
"""
xlim = self.grip.range
fitter = self.fitter
def on_success(result):
fit_result, _, _, _ = result
self._report_fit(fitter.summarize(*result))
self.main.profile.plot_fit(fitter, fit_result)
def on_fail(exc_info):
exc = '\n'.join(traceback.format_exception(*exc_info))
self._report_fit("Error during fitting:\n%s" % exc)
def on_done():
self.ui.fit_button.setText("Fit")
self.ui.fit_button.setEnabled(True)
self.canvas.draw()
self.ui.fit_button.setText("Running...")
self.ui.fit_button.setEnabled(False)
w = Worker(self.main.profile.fit, fitter, xlim=xlim)
w.result.connect(on_success)
w.error.connect(on_fail)
w.finished.connect(on_done)
self._fit_worker = w # hold onto a reference
w.start()
def _report_fit(self, report):
self.ui.results_box.document().setPlainText(report)
class SpectrumMainWindow(QMainWindow):
subset_dropped = Signal(object)
def __init__(self, parent=None):
super(SpectrumMainWindow, self).__init__(parent=parent)
self.setAcceptDrops(True)
def dragEnterEvent(self, event):
if event.mimeData().hasFormat(LAYERS_MIME_TYPE):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
layer = event.mimeData().data(LAYERS_MIME_TYPE)[0]
if isinstance(layer, Subset):
self.subset_dropped.emit(layer)
class SpectrumTool(object):
def __init__(self, image_widget):
self._relim_requested = True
self.image_widget = image_widget
self._build_main_widget()
self.client = self.image_widget.client
self.profile = ProfileViewer(self.canvas.fig)
self.axes = self.profile.axes
self.mouse_mode = self._setup_mouse_mode()
self._setup_toolbar()
self._setup_ctxbar()
self._connect()
self.image_widget.session.application.add_widget(self,
label='Profile')
def mdi_wrap(self):
sub = QMdiSubWindow()
sub.setWidget(self.widget)
self.widget.destroyed.connect(sub.close)
sub.resize(self.widget.size())
self._mdi_wrapper = sub
return sub
def _build_main_widget(self):
self.widget = SpectrumMainWindow()
w = QWidget()
l = QHBoxLayout()
l.setSpacing(2)
l.setContentsMargins(2, 2, 2, 2)
w.setLayout(l)
mpl = MplWidget()
self.canvas = mpl.canvas
l.addWidget(mpl)
l.setStretchFactor(mpl, 5)
self.widget.setCentralWidget(w)
def _setup_ctxbar(self):
l = self.widget.centralWidget().layout()
self._contexts = [NavContext(self),
FitContext(self),
CollapseContext(self)]
tabs = QTabWidget()
tabs.addTab(self._contexts[0].widget, 'Navigate')
tabs.addTab(self._contexts[1].widget, 'Fit')
tabs.addTab(self._contexts[2].widget, 'Collapse')
self._tabs = tabs
self._tabs.setVisible(False)
l.addWidget(tabs)
l.setStretchFactor(tabs, 0)
def _connect(self):
add_callback(self.client, 'slice',
self._check_invalidate,
echo_old=True)
def _on_tab_change(index):
for i, ctx in enumerate(self._contexts):
ctx.set_enabled(i == index)
if i == index:
self.profile.active_grip = ctx.grip
self._tabs.currentChanged.connect(_on_tab_change)
_on_tab_change(self._tabs.currentIndex())
self.widget.subset_dropped.connect(self._extract_subset_profile)
def _setup_mouse_mode(self):
# This will be added to the ImageWidget's toolbar
mode = SpectrumExtractorMode(self.image_widget.client.axes,
release_callback=self._update_profile)
return mode
def _setup_toolbar(self):
tb = GlueToolbar(self.canvas, self.widget)
# disable ProfileViewer mouse processing during mouse modes
tb.mode_activated.connect(self.profile.disconnect)
tb.mode_deactivated.connect(self.profile.connect)
self._menu_toggle_action = QAction("Options", tb)
self._menu_toggle_action.setCheckable(True)
self._menu_toggle_action.toggled.connect(self._toggle_menu)
tb.addAction(self._menu_toggle_action)
self.widget.addToolBar(tb)
return tb
def _toggle_menu(self, active):
self._tabs.setVisible(active)
def _check_invalidate(self, slc_old, slc_new):
"""
If we change the orientation of the slice,
reset and hide the profile viewer
"""
if self.profile_axis is None:
return
if (slc_old.index('x') != slc_new.index('x') or
slc_old.index('y') != slc_new.index('y')):
self.reset()
def reset(self):
self.hide()
self.mouse_mode.clear()
self._relim_requested = True
@property
def data(self):
return self.client.display_data
@property
def profile_axis(self):
# XXX make this settable
# defaults to the non-xy axis with the most channels
slc = self.client.slice
candidates = [i for i, s in enumerate(slc) if s not in ['x', 'y']]
return max(candidates, key=lambda i: self.data.shape[i])
def _recenter_grips(self):
for ctx in self._contexts:
ctx.recenter(self.axes.get_xlim())
def _extract_subset_profile(self, subset):
slc = self.client.slice
try:
x, y = Extractor.subset_spectrum(subset,
self.client.display_attribute,
slc,
self.profile_axis)
except IncompatibleAttribute:
return
self._set_profile(x, y)
def _update_profile(self, *args):
data = self.data
att = self.client.display_attribute
slc = self.client.slice
roi = self.mouse_mode.roi()
if data is None or att is None:
return
zax = self.profile_axis
x, y = Extractor.spectrum(data, att, roi, slc, zax)
self._set_profile(x, y)
def _set_profile(self, x, y):
data = self.data
xid = data.get_world_component_id(self.profile_axis)
units = data.get_component(xid).units
xlabel = str(xid) if units is None else '%s [%s]' % (xid, units)
xlim = self.axes.get_xlim()
self.profile.set_xlabel(xlabel)
self.profile.set_profile(x, y, c='k')
# relim x range if requested
if self._relim_requested:
self._relim_requested = False
self.axes.set_xlim(np.nanmin(x), np.nanmax(x))
# relim y range to data within the view window
self.profile.autoscale_ylim()
if self.axes.get_xlim() != xlim:
self._recenter_grips()
self.axes.figure.canvas.draw()
self.show()
def _move_below_image_widget(self):
rect = self.image_widget.frameGeometry()
pos = rect.bottomLeft()
self._mdi_wrapper.setGeometry(pos.x(), pos.y(),
rect.width(), 300)
def show(self):
if self.widget.isVisible():
return
self._move_below_image_widget()
self.widget.show()
def hide(self):
self.widget.close()
| {
"content_hash": "52d35046912140b0e03a19d8cd6cd8d1",
"timestamp": "",
"source": "github",
"line_count": 700,
"max_line_length": 79,
"avg_line_length": 31.117142857142856,
"alnum_prop": 0.5659719034064824,
"repo_name": "bsipocz/glue",
"id": "c339f198d6a523aa8a882bcbc75148a99e6c9bdd",
"size": "21782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/qt/spectrum_tool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: postgresql_membership
short_description: Add or remove PostgreSQL roles from groups
description:
- Adds or removes PostgreSQL roles from groups (other roles).
- Users are roles with login privilege.
- Groups are PostgreSQL roles usually without LOGIN privilege.
- "Common use case:"
- 1) add a new group (groups) by M(postgresql_user) module with I(role_attr_flags=NOLOGIN)
- 2) grant them desired privileges by M(postgresql_privs) module
- 3) add desired PostgreSQL users to the new group (groups) by this module
version_added: '2.8'
options:
groups:
description:
- The list of groups (roles) that need to be granted to or revoked from I(target_roles).
required: yes
type: list
aliases:
- group
- source_role
- source_roles
target_roles:
description:
- The list of target roles (groups will be granted to them).
required: yes
type: list
aliases:
- target_role
- users
- user
fail_on_role:
description:
- If C(yes), fail when group or target_role doesn't exist. If C(no), just warn and continue.
default: yes
type: bool
state:
description:
- Membership state.
- I(state=present) implies the I(groups)must be granted to I(target_roles).
- I(state=absent) implies the I(groups) must be revoked from I(target_roles).
type: str
default: present
choices: [ absent, present ]
db:
description:
- Name of database to connect to.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
seealso:
- module: postgresql_user
- module: postgresql_privs
- module: postgresql_owner
- name: PostgreSQL role membership reference
description: Complete reference of the PostgreSQL role membership documentation.
link: https://www.postgresql.org/docs/current/role-membership.html
- name: PostgreSQL role attributes reference
description: Complete reference of the PostgreSQL role attributes documentation.
link: https://www.postgresql.org/docs/current/role-attributes.html
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Grant role read_only to alice and bob
postgresql_membership:
group: read_only
target_roles:
- alice
- bob
state: present
# you can also use target_roles: alice,bob,etc to pass the role list
- name: Revoke role read_only and exec_func from bob. Ignore if roles don't exist
postgresql_membership:
groups:
- read_only
- exec_func
target_role: bob
fail_on_role: no
state: absent
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: str
sample: [ "GRANT \"user_ro\" TO \"alice\"" ]
granted:
description: Dict of granted groups and roles.
returned: if I(state=present)
type: dict
sample: { "ro_group": [ "alice", "bob" ] }
revoked:
description: Dict of revoked groups and roles.
returned: if I(state=absent)
type: dict
sample: { "ro_group": [ "alice", "bob" ] }
state:
description: Membership state that tried to be set.
returned: always
type: str
sample: "present"
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
PgMembership,
postgres_common_argument_spec,
)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
groups=dict(type='list', aliases=['group', 'source_role', 'source_roles']),
target_roles=dict(type='list', aliases=['target_role', 'user', 'users']),
fail_on_role=dict(type='bool', default=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
groups = module.params['groups']
target_roles = module.params['target_roles']
fail_on_role = module.params['fail_on_role']
state = module.params['state']
conn_params = get_conn_params(module, module.params, warn_db_default=False)
db_connection = connect_to_db(module, conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##############
# Create the object and do main job:
pg_membership = PgMembership(module, cursor, groups, target_roles, fail_on_role)
if state == 'present':
pg_membership.grant()
elif state == 'absent':
pg_membership.revoke()
# Rollback if it's possible and check_mode:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Make return values:
return_dict = dict(
changed=pg_membership.changed,
state=state,
groups=pg_membership.groups,
target_roles=pg_membership.target_roles,
queries=pg_membership.executed_queries,
)
if state == 'present':
return_dict['granted'] = pg_membership.granted
elif state == 'absent':
return_dict['revoked'] = pg_membership.revoked
module.exit_json(**return_dict)
if __name__ == '__main__':
main()
| {
"content_hash": "178a64daf4ae93d30c99c7595b3916fd",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 98,
"avg_line_length": 28.601895734597157,
"alnum_prop": 0.6647887323943662,
"repo_name": "thaim/ansible",
"id": "31a781e0210ed88e412605f499633b14510bbeff",
"size": "6248",
"binary": false,
"copies": "3",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/database/postgresql/postgresql_membership.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset, CompoundType
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
# test compound data types.
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
DIM_NAME = 'phony_dim'
GROUP_NAME = 'phony_group'
VAR_NAME = 'phony_compound_var'
TYPE_NAME1 = 'cmp1'
TYPE_NAME2 = 'cmp2'
TYPE_NAME3 = 'cmp3'
TYPE_NAME4 = 'cmp4'
TYPE_NAME5 = 'cmp5'
DIM_SIZE=3
# unaligned data types (note they are nested)
dtype1=np.dtype([('i', 'i2'), ('j', 'i8')])
dtype2=np.dtype([('x', 'f4',), ('y', 'f8',(3,2))])
dtype3=np.dtype([('xx', dtype1), ('yy', dtype2)])
dtype4=np.dtype([('xxx',dtype3),('yyy','f8', (4,))])
dtype5=np.dtype([('x1', dtype1), ('y1', dtype2)])
# aligned data types
dtype1a = np.dtype({'names':['i','j'],'formats':['<i2','<i8']},align=True)
dtype2a = np.dtype({'names':['x','y'],'formats':['<f4',('<f8', (3, 2))]},align=True)
dtype3a = np.dtype({'names':['xx','yy'],'formats':[dtype1a,dtype2a]},align=True)
dtype4a = np.dtype({'names':['xxx','yyy'],'formats':[dtype3a,('f8', (4,))]},align=True)
dtype5a = np.dtype({'names':['x1','y1'],'formats':[dtype1a,dtype2a]},align=True)
data = np.zeros(DIM_SIZE,dtype4)
data['xxx']['xx']['i']=1
data['xxx']['xx']['j']=2
data['xxx']['yy']['x']=3
data['xxx']['yy']['y']=4
data['yyy'] = 5
datag = np.zeros(DIM_SIZE,dtype5)
datag['x1']['i']=10
datag['x1']['j']=20
datag['y1']['x']=30
datag['y1']['y']=40
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file, 'w')
d = f.createDimension(DIM_NAME,DIM_SIZE)
g = f.createGroup(GROUP_NAME)
# simple compound types.
cmptype1 = f.createCompoundType(dtype1, TYPE_NAME1)
cmptype2 = f.createCompoundType(dtype2, TYPE_NAME2)
# close and reopen the file to make sure compound
# type info read back in correctly.
f.close()
f = Dataset(self.file,'r+')
g = f.groups[GROUP_NAME]
# multiply nested compound types
cmptype3 = f.createCompoundType(dtype3, TYPE_NAME3)
cmptype4 = f.createCompoundType(dtype4, TYPE_NAME4)
cmptype5 = f.createCompoundType(dtype5, TYPE_NAME5)
v = f.createVariable(VAR_NAME,cmptype4, DIM_NAME)
vv = g.createVariable(VAR_NAME,cmptype5, DIM_NAME)
v[:] = data
vv[:] = datag
# try reading the data back before the file is closed
dataout = v[:]
dataoutg = vv[:]
assert (cmptype4 == dtype4a) # data type should be aligned
assert (dataout.dtype == dtype4a) # data type should be aligned
assert(list(f.cmptypes.keys()) ==\
[TYPE_NAME1,TYPE_NAME2,TYPE_NAME3,TYPE_NAME4,TYPE_NAME5])
assert_array_equal(dataout['xxx']['xx']['i'],data['xxx']['xx']['i'])
assert_array_equal(dataout['xxx']['xx']['j'],data['xxx']['xx']['j'])
assert_array_almost_equal(dataout['xxx']['yy']['x'],data['xxx']['yy']['x'])
assert_array_almost_equal(dataout['xxx']['yy']['y'],data['xxx']['yy']['y'])
assert_array_almost_equal(dataout['yyy'],data['yyy'])
assert_array_equal(dataoutg['x1']['i'],datag['x1']['i'])
assert_array_equal(dataoutg['x1']['j'],datag['x1']['j'])
assert_array_almost_equal(dataoutg['y1']['x'],datag['y1']['x'])
assert_array_almost_equal(dataoutg['y1']['y'],datag['y1']['y'])
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
#pass
def runTest(self):
"""testing compound variables"""
f = Dataset(self.file, 'r')
v = f.variables[VAR_NAME]
g = f.groups[GROUP_NAME]
vv = g.variables[VAR_NAME]
dataout = v[:]
dataoutg = vv[:]
# make sure data type is aligned
assert (f.cmptypes['cmp4'] == dtype4a)
assert(list(f.cmptypes.keys()) ==\
[TYPE_NAME1,TYPE_NAME2,TYPE_NAME3,TYPE_NAME4,TYPE_NAME5])
assert_array_equal(dataout['xxx']['xx']['i'],data['xxx']['xx']['i'])
assert_array_equal(dataout['xxx']['xx']['j'],data['xxx']['xx']['j'])
assert_array_almost_equal(dataout['xxx']['yy']['x'],data['xxx']['yy']['x'])
assert_array_almost_equal(dataout['xxx']['yy']['y'],data['xxx']['yy']['y'])
assert_array_almost_equal(dataout['yyy'],data['yyy'])
assert_array_equal(dataoutg['x1']['i'],datag['x1']['i'])
assert_array_equal(dataoutg['x1']['j'],datag['x1']['j'])
assert_array_almost_equal(dataoutg['y1']['x'],datag['y1']['x'])
assert_array_almost_equal(dataoutg['y1']['y'],datag['y1']['y'])
f.close()
# issue 773
f = Dataset(self.file,'w')
dtype = np.dtype([('observation', 'i4'),
('station_name','S80')])
dtype_nest = np.dtype([('observation', 'i4'),
('station_name','S80'),
('nested_observation',dtype)])
station_data_t1 = f.createCompoundType(dtype,'station_data1')
station_data_t2 = f.createCompoundType(dtype_nest,'station_data')
f.createDimension('station',None)
statdat = f.createVariable('station_obs', station_data_t2, ('station',))
assert(statdat.dtype == station_data_t2.dtype)
datain = np.empty(2,station_data_t2.dtype_view)
datain['observation'][:] = (123,314)
datain['station_name'][:] = ('Boulder','New York')
datain['nested_observation']['observation'][:] = (-999,999)
datain['nested_observation']['station_name'][:] = ('Boston','Chicago')
statdat[:] = datain
f.close()
f = Dataset(self.file)
dataout = f['station_obs'][:]
assert(dataout.dtype == station_data_t2.dtype_view)
assert_array_equal(datain, dataout)
f.close()
if __name__ == '__main__':
from netCDF4 import getlibversion
version = getlibversion().split()[0]
unittest.main()
| {
"content_hash": "37aa1410a7cff5e0ce9ffa32ff632f3a",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 87,
"avg_line_length": 42.33098591549296,
"alnum_prop": 0.580934952586924,
"repo_name": "Unidata/netcdf4-python",
"id": "72b6892063a1c4e02a9b10ed225e8f29bf2ec30e",
"size": "6011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/tst_compoundvar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "425"
},
{
"name": "Cython",
"bytes": "334106"
},
{
"name": "Python",
"bytes": "296829"
},
{
"name": "Shell",
"bytes": "469"
}
],
"symlink_target": ""
} |
"""Views for Zinnia tags"""
from django.http import Http404
from django.views.generic.list import ListView
from django.views.generic.list import BaseListView
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as _
from tagging.utils import get_tag
from tagging.models import Tag
from tagging.models import TaggedItem
from zinnia.models import Entry
from zinnia.settings import PAGINATION
from zinnia.views.mixins.templates import EntryQuerysetTemplateResponseMixin
class TagList(ListView):
"""View return a list of all published tags"""
template_name = 'zinnia/tag_list.html'
context_object_name = 'tag_list'
def get_queryset(self):
"""Override the get_queryset method to
compute and return the published tags"""
return Tag.objects.usage_for_queryset(
Entry.published.all(), counts=True)
class TagDetail(EntryQuerysetTemplateResponseMixin, BaseListView):
"""View return a list of all the entries
published under the current tag"""
model_type = 'tag'
paginate_by = PAGINATION
def get_model_name(self):
"""The model name is the tag slugified"""
return slugify(self.tag)
def get_queryset(self):
"""Return a queryset of entries published
belonging to the current tag"""
self.tag = get_tag(self.kwargs['tag'])
if self.tag is None:
raise Http404(_('No Tag found matching "%s".') %
self.kwargs['tag'])
return TaggedItem.objects.get_by_model(
Entry.published.all(), self.tag)
def get_context_data(self, **kwargs):
"""Add the current tag in context"""
context = super(TagDetail, self).get_context_data(**kwargs)
context['tag'] = self.tag
return context
| {
"content_hash": "2acf54e2747751a96392f3a79596a126",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 76,
"avg_line_length": 34.301886792452834,
"alnum_prop": 0.682068206820682,
"repo_name": "westinedu/similarinterest",
"id": "af302fa2517f29adf1c98886cfa1c41cb4d84635",
"size": "1818",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zinnia/views/tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "446844"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "6029908"
},
{
"name": "Ruby",
"bytes": "249"
},
{
"name": "Shell",
"bytes": "1355"
}
],
"symlink_target": ""
} |
"""
Django settings for sc_project project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from scsite.static import sensitive
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = sensitive.SECRET_K
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'shecodes.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'scsite',
'bootstrap3',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sc_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'sc_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Israel'
USE_I18N = True
USE_L10N = True
USE_TZ = False
ugettext = lambda s: s
LANGUAGES = (
('en', ugettext('English')),
('he', ugettext('Hebrew')),
)
prefix_default_language = False
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
SITE_ID = 1
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale/'),
os.path.join(BASE_DIR, 'scsite/locale/'),
os.path.join(BASE_DIR, 'scsite/templates/locale/'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
PROJECT_ROOT = os.path.join(os.path.dirname(__file__), '..')
SITE_ROOT = PROJECT_ROOT
STATIC_ROOT = os.path.join(SITE_ROOT, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(SITE_ROOT, 'media')
MEDIA_URL = '/media/'
LOGIN_REDIRECT_URL = 'home'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = sensitive.EMAIL_HOST_USER
EMAIL_HOST_PASSWORD = sensitive.EMAIL_HOST_PASSWORD
# when online, please change username and password to that of [email protected]
| {
"content_hash": "a5b0f0c9b5eacce2ac7516617075f73a",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 91,
"avg_line_length": 27.20625,
"alnum_prop": 0.6898690558235699,
"repo_name": "shovalsa/shecodes_website",
"id": "5d9e47c8e0ab5268168e9b838b039a80cf91156e",
"size": "4353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sc_project/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "54504"
},
{
"name": "JavaScript",
"bytes": "12038"
},
{
"name": "Python",
"bytes": "32621"
}
],
"symlink_target": ""
} |
"""
homeassistant.components.alarm_control_panel.verisure
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Interfaces with Verisure alarm control panel.
"""
import logging
import homeassistant.components.verisure as verisure
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.const import (
STATE_UNKNOWN,
STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_AWAY)
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Verisure platform. """
if not verisure.MY_PAGES:
_LOGGER.error('A connection has not been made to Verisure mypages.')
return False
alarms = []
alarms.extend([
VerisureAlarm(value)
for value in verisure.get_alarm_status().values()
if verisure.SHOW_ALARM
])
add_devices(alarms)
# pylint: disable=abstract-method
class VerisureAlarm(alarm.AlarmControlPanel):
""" Represents a Verisure alarm status. """
def __init__(self, alarm_status):
self._id = alarm_status.id
self._device = verisure.MY_PAGES.DEVICE_ALARM
self._state = STATE_UNKNOWN
@property
def name(self):
""" Returns the name of the device. """
return 'Alarm {}'.format(self._id)
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def code_format(self):
""" Four digit code required. """
return '^\\d{4}$'
def update(self):
""" Update alarm status """
verisure.update()
if verisure.STATUS[self._device][self._id].status == 'unarmed':
self._state = STATE_ALARM_DISARMED
elif verisure.STATUS[self._device][self._id].status == 'armedhome':
self._state = STATE_ALARM_ARMED_HOME
elif verisure.STATUS[self._device][self._id].status == 'armedaway':
self._state = STATE_ALARM_ARMED_AWAY
elif verisure.STATUS[self._device][self._id].status != 'pending':
_LOGGER.error(
'Unknown alarm state %s',
verisure.STATUS[self._device][self._id].status)
def alarm_disarm(self, code=None):
""" Send disarm command. """
verisure.MY_PAGES.set_alarm_status(
code,
verisure.MY_PAGES.ALARM_DISARMED)
_LOGGER.warning('disarming')
def alarm_arm_home(self, code=None):
""" Send arm home command. """
verisure.MY_PAGES.set_alarm_status(
code,
verisure.MY_PAGES.ALARM_ARMED_HOME)
_LOGGER.warning('arming home')
def alarm_arm_away(self, code=None):
""" Send arm away command. """
verisure.MY_PAGES.set_alarm_status(
code,
verisure.MY_PAGES.ALARM_ARMED_AWAY)
_LOGGER.warning('arming away')
| {
"content_hash": "96c51593c14983b8667c50ca56620c37",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 76,
"avg_line_length": 30.627659574468087,
"alnum_prop": 0.6036818339701285,
"repo_name": "pottzer/home-assistant",
"id": "9e0475592bd6250053d251368e258eacb136805e",
"size": "2879",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/alarm_control_panel/verisure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1044045"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Album.public'
db.add_column(u'photoapp_album', 'public',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Album.public'
db.delete_column(u'photoapp_album', 'public')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'photoapp.album': {
'Meta': {'object_name': 'Album'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'photog': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'photoapp.photo': {
'Meta': {'object_name': 'Photo'},
'album': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['photoapp.Album']"}),
'caption': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'photog': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
u'photoapp.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photos': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['photoapp.Photo']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.TextField', [], {'max_length': '64'})
}
}
complete_apps = ['photoapp'] | {
"content_hash": "4a861d796eced9fadb86379ba4083280",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 195,
"avg_line_length": 67.5632183908046,
"alnum_prop": 0.5527390268798911,
"repo_name": "markcharyk/django-photos",
"id": "2461157879b31a095966ce9c2efaab1cc175d345",
"size": "5902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "photoapp/migrations/0003_auto__add_field_album_public.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "208"
},
{
"name": "Python",
"bytes": "44102"
}
],
"symlink_target": ""
} |
from traits.api import Bool, Enum, Tuple
from drag_tool import DragTool
class MoveTool(DragTool):
""" Generic tool for moving a component's position relative to its container
"""
drag_button = Enum("left", "right")
# Should the moved component be raised to the top of its container's
# list of components? This is only recommended for overlaying containers
# and canvases, but generally those are the only ones in which the
# MoveTool will be useful.
auto_raise = Bool(True)
# The last cursor position we saw; used during drag to compute deltas
_prev_pos = Tuple(0, 0)
def is_draggable(self, x, y):
if self.component:
c = self.component
return (c.x <= x <= c.x2) and (c.y <= y <= c.y2)
else:
return False
def drag_start(self, event):
if self.component:
self._prev_pos = (event.x, event.y)
self.component._layout_needed = True
if self.auto_raise:
# Push the component to the top of its container's list
self.component.container.raise_component(self.component)
event.window.set_mouse_owner(self, event.net_transform())
event.handled = True
return
def dragging(self, event):
if self.component:
dx = event.x - self._prev_pos[0]
dy = event.y - self._prev_pos[1]
pos = self.component.position
self.component.position = [pos[0] + dx, pos[1] + dy]
self.component._layout_needed = True
self.component.request_redraw()
self._prev_pos = (event.x, event.y)
event.handled = True
return
| {
"content_hash": "fce24bafcae5d68c559724376a797e0e",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 80,
"avg_line_length": 33.509803921568626,
"alnum_prop": 0.593329432416618,
"repo_name": "tommy-u/enable",
"id": "db053c16d3b38cef60f1908cfc969e22688c511b",
"size": "1710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enable/tools/move_tool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "240"
},
{
"name": "C",
"bytes": "5526949"
},
{
"name": "C++",
"bytes": "3058044"
},
{
"name": "DIGITAL Command Language",
"bytes": "35819"
},
{
"name": "Groff",
"bytes": "236"
},
{
"name": "Makefile",
"bytes": "58238"
},
{
"name": "Objective-C",
"bytes": "16551"
},
{
"name": "Python",
"bytes": "2202660"
},
{
"name": "Shell",
"bytes": "6286"
}
],
"symlink_target": ""
} |
"""
Tests here shouldn't be run with sqlite backend.
"""
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from projector.tests.base import test_concurrently
class ThreadedProjectTest(TestCase):
def test_list(self):
url = reverse('projector_project_list')
@test_concurrently(10)
def toggle_test():
client = Client()
client.get(url)
toggle_test()
| {
"content_hash": "973bd17f380666323dc86b007eb7c6d2",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 50,
"avg_line_length": 21.772727272727273,
"alnum_prop": 0.6764091858037579,
"repo_name": "lukaszb/django-projector",
"id": "d96f4989f302a6582e65374c5d6b9119354bb2ea",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projector/tests/test_concurrency.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "43056"
},
{
"name": "Python",
"bytes": "309918"
}
],
"symlink_target": ""
} |
"""Setup script for hiiguid."""
from setuptools import setup, find_packages
from hiiguid import VERSION
version = '.'.join(map(str, VERSION))
# some trove classifiers:
# License :: OSI Approved :: MIT License
# Intended Audience :: Developers
# Operating System :: POSIX
setup(
name='hiiguid',
version=version,
description="HiiGUID generator",
long_description=open('README.md').read(),
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
],
keywords='guid uuid',
author='HiiDef',
author_email='[email protected]',
url="'http://github.com/hiidef/hiiguid'",
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
test_suite="tests",
# -*- Extra requirements: -*-
install_requires=[
],
entry_points="""
# -*- Entry points: -*-
""",
)
| {
"content_hash": "a6e4c0fbca2be19a3c710c53f14680ce",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 77,
"avg_line_length": 26.7,
"alnum_prop": 0.6338951310861424,
"repo_name": "hiidef/hiiguid",
"id": "9b9d736bd9dd4f7da491e60fc809e1fbf2261d42",
"size": "1115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17106"
}
],
"symlink_target": ""
} |
"""Pull out station metadata."""
from collections import ChainMap, namedtuple
from collections.abc import Mapping
from functools import cached_property
import numpy as np
import pandas as pd
from ..cbook import get_test_data
from ..package_tools import Exporter
from ..units import units
exporter = Exporter(globals())
Station = namedtuple('Station', ['id', 'synop_id', 'name', 'state', 'country',
'longitude', 'latitude', 'altitude', 'source'])
def to_dec_deg(dms):
"""Convert to decimal degrees."""
if not dms:
return 0.
deg, minutes = dms.split()
side = minutes[-1]
minutes = minutes[:2]
float_deg = int(deg) + int(minutes) / 60.
return float_deg if side in ('N', 'E') else -float_deg
def _read_station_table(input_file=None):
"""Read in the GEMPAK station table.
Yields tuple of station ID and `Station` for each entry.
"""
if input_file is None:
input_file = get_test_data('sfstns.tbl', as_file_obj=False)
with open(input_file) as station_file:
for line in station_file:
stid = line[:9].strip()
synop_id = int(line[9:16].strip())
name = line[16:49].strip()
state = line[49:52].strip()
country = line[52:55].strip()
lat = int(line[55:61].strip()) / 100.
lon = int(line[61:68].strip()) / 100.
alt = int(line[68:74].strip())
yield stid, Station(stid, synop_id=synop_id, name=name.title(), latitude=lat,
longitude=lon, altitude=alt, country=country, state=state,
source=input_file)
def _read_master_text_file(input_file=None):
"""Read in the master text file.
Yields tuple of station ID and `Station` for each entry.
"""
if input_file is None:
input_file = get_test_data('master.txt', as_file_obj=False)
with open(input_file) as station_file:
station_file.readline()
for line in station_file:
state = line[:3].strip()
name = line[3:20].strip().replace('_', ' ')
stid = line[20:25].strip()
synop_id = line[32:38].strip()
lat = to_dec_deg(line[39:46].strip())
lon = to_dec_deg(line[47:55].strip())
alt_part = line[55:60].strip()
alt = int(alt_part or 0.)
if stid:
if stid[0] in ('P', 'K'):
country = 'US'
else:
country = state
state = '--'
yield stid, Station(stid, synop_id=synop_id, name=name.title(), latitude=lat,
longitude=lon, altitude=alt, country=country, state=state,
source=input_file)
def _read_station_text_file(input_file=None):
"""Read the station text file.
Yields tuple of station ID and `Station` for each entry.
"""
if input_file is None:
input_file = get_test_data('stations.txt', as_file_obj=False)
with open(input_file) as station_file:
for line in station_file:
if line[0] == '!':
continue
lat = line[39:45].strip()
if not lat or lat == 'LAT':
continue
lat = to_dec_deg(lat)
state = line[:3].strip()
name = line[3:20].strip().replace('_', ' ')
stid = line[20:25].strip()
synop_id = line[32:38].strip()
lon = to_dec_deg(line[47:55].strip())
alt = int(line[55:60].strip())
country = line[81:83].strip()
yield stid, Station(stid, synop_id=synop_id, name=name.title(), latitude=lat,
longitude=lon, altitude=alt, country=country, state=state,
source=input_file)
def _read_airports_file(input_file=None):
"""Read the airports file."""
if input_file is None:
input_file = get_test_data('airport-codes.csv', as_file_obj=False)
df = pd.read_csv(input_file)
return pd.DataFrame({'id': df.ident.values, 'synop_id': 99999,
'latitude': df.latitude_deg.values,
'longitude': df.longitude_deg.values,
'altitude': units.Quantity(df.elevation_ft.values, 'ft').to('m').m,
'country': df.iso_region.str.split('-', n=1, expand=True)[1].values,
'source': input_file
}).to_dict()
@exporter.export
class StationLookup(Mapping):
"""Look up station information from multiple sources.
This class follows the `Mapping` protocol with station ID as the key. This makes it
possible to e.g. iterate over all locations and get all of a certain criteria:
>>> import metpy.io
>>> conus_stations = [s for s in metpy.io.station_info if s.startswith('K')]
>>> conus_stations[:3]
['KEET', 'K8A0', 'KALX']
"""
@cached_property
def tables(self):
"""Return an iterable mapping combining all the tables."""
return ChainMap(dict(_read_station_table()),
dict(_read_master_text_file()),
dict(_read_station_text_file()),
dict(_read_airports_file()))
def __len__(self):
"""Get the number of stations."""
return len(self.tables)
def __iter__(self):
"""Allow iteration over the stations."""
return iter(self.tables)
def __getitem__(self, stid):
"""Lookup station information from the ID."""
try:
return self.tables[stid]
except KeyError:
raise KeyError(f'No station information for {stid}') from None
with exporter:
station_info = StationLookup()
@exporter.export
def add_station_lat_lon(df, stn_var=None):
"""Lookup station information to add the station latitude and longitude to the DataFrame.
This function will add two columns to the DataFrame ('latitude' and 'longitude') after
looking up all unique station identifiers available in the DataFrame.
Parameters
----------
df : `pandas.DataFrame`
The DataFrame that contains the station observations
stn_var : str, optional
The string of the variable name that represents the station in the DataFrame. If not
provided, 'station', 'stid', and 'station_id' are tried in that order.
Returns
-------
`pandas.DataFrame` that contains original Dataframe now with the latitude and longitude
values for each location found in :data:`!station_info`.
"""
def key_finder(df):
names_to_try = ('station', 'stid', 'station_id')
for id_name in names_to_try:
if id_name in df:
return id_name
raise KeyError('Second argument not provided to add_station_lat_lon, but none of '
f'{names_to_try} were found.')
df['latitude'] = None
df['longitude'] = None
if stn_var is None:
stn_var = key_finder(df)
for stn in df[stn_var].unique():
try:
info = station_info[stn]
df.loc[df[stn_var] == stn, 'latitude'] = info.latitude
df.loc[df[stn_var] == stn, 'longitude'] = info.longitude
except KeyError:
df.loc[df[stn_var] == stn, 'latitude'] = np.nan
df.loc[df[stn_var] == stn, 'longitude'] = np.nan
return df
| {
"content_hash": "d886c26088290e3bae2726926d2308ea",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 93,
"avg_line_length": 36.75369458128079,
"alnum_prop": 0.5617209489344592,
"repo_name": "Unidata/MetPy",
"id": "4db0f2ec5e11312b7a93deea08f2ad34c2bfa68e",
"size": "7599",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/metpy/io/station_data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "551"
},
{
"name": "Makefile",
"bytes": "59"
},
{
"name": "Python",
"bytes": "1841514"
},
{
"name": "Ruby",
"bytes": "137"
}
],
"symlink_target": ""
} |
from azure.cli.testsdk import ScenarioTest
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
class SecurityCenterLocationsTests(ScenarioTest):
def test_security_locations(self):
locations = self.cmd('az security location list').get_output_in_json()
assert len(locations) == 1
self.cmd('az security location show -n ' + locations[0]["name"]).get_output_in_json()
| {
"content_hash": "a034d38ad597a03b4b287b995c6a847f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 93,
"avg_line_length": 31.53846153846154,
"alnum_prop": 0.724390243902439,
"repo_name": "yugangw-msft/azure-cli",
"id": "9c9cb468ca233ecd1b8c31c68803d3a0fb3508ed",
"size": "756",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/security/tests/latest/test_locations_scenario.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.generics import get_object_or_404
from apps.articles.models import Article
from apps.articles.serializers import ArticleSerializer
class ArticleViewSet(viewsets.ModelViewSet):
queryset = Article.objects.all().order_by('-created')
serializer_class = ArticleSerializer
def articles(request):
article_objects = Article.objects.order_by('-created')
return render(request, 'articles/articles.html', {'articles': article_objects})
def details(request, article_id, article_slug):
# Get the article based on its id
article = get_object_or_404(Article, pk=article_id)
return render(request, 'articles/details.html', {'article': article})
| {
"content_hash": "7002f9c8a7cdea711fcf9b1522f5c619",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 83,
"avg_line_length": 31.583333333333332,
"alnum_prop": 0.7598944591029023,
"repo_name": "sardred/andlarweb",
"id": "8fb9416b1c6b59b529e2b9ccbdab4df83023f822",
"size": "758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/articles/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1336"
},
{
"name": "HTML",
"bytes": "1432"
},
{
"name": "JavaScript",
"bytes": "12629"
},
{
"name": "Python",
"bytes": "14442"
}
],
"symlink_target": ""
} |
from artchem.ReactionVessel import *
class Disperser:
def __init__( self ):
""" create networked topology inside single reaction vessel:
multiple vessels are mimicked via different groups of molecular
species
"""
topo = [ [1, 2], [1, 3], [1, 4], [2, 3] ]
(catalysts, reactionstrs) = self.mkreactions(topo)
# create reactor, choosing N_A*V such that c=k=1 overall
self.reactor = GillespieVessel(nav=1)
self.reactor.parse(reactionstrs)
self.reactor.inject('X4', 1000)
for c in catalysts:
self.reactor.inject(c, 1)
def run( self, finalvt ):
""" run the disperser protocol until simulation time finalvt """
plotall = False
self.reactor.trace_title('X')
self.reactor.trace_mult('X')
ptime = 0.0
injected = False
expelled = False
stchange = False
while (self.reactor.vtime() <= finalvt):
t = self.reactor.vtime()
self.reactor.iterate()
dt = t - ptime
if (t >= 10 and not injected):
self.reactor.inject('X2', 600)
injected = True
stchange = True
if (t >= 20 and not expelled):
self.reactor.expel('X2', 300)
expelled = True
stchange = True
if (dt >= 0.1 or plotall or stchange):
self.reactor.trace_mult('X')
ptime = t
stchange = False
def mkreactions( self, topo ):
""" constructs the list of reactions (with their catalysts)
automatically for a given topology 'topo';
'topo' is a list of links between 2 nodes (reactors);
each link is described by a pair [id1, id2] of node ids
identifying the 2 nodes to be interconnected.
CAUTION: only works with node ids made of single digits!!
"""
clist = []
rlist = []
for lnk in topo:
n0 = lnk[0]
n1 = lnk[1]
c0 = "C%d%d" % (n0, n1)
c1 = "C%d%d" % (n1, n0)
r0 = "%s + X%d --> %s + X%d" % (c0, n0, c0, n1)
r1 = "%s + X%d --> %s + X%d" % (c1, n1, c1, n0)
clist.append(c0)
clist.append(c1)
rlist.append(r0)
rlist.append(r1)
return (clist, rlist)
if __name__ == '__main__':
disperser = Disperser()
disperser.run(30.0)
| {
"content_hash": "f557e81cef1087c6c3225b7d4a371019",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 75,
"avg_line_length": 34.15068493150685,
"alnum_prop": 0.5086241476133173,
"repo_name": "danielrcardenas/ac-course-2017",
"id": "f6a59788c16d6c2426ff63aae9c03266f7f814bf",
"size": "4127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frameworks/pycellchem-2.0/src/Disperser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "104"
},
{
"name": "CSS",
"bytes": "3688"
},
{
"name": "HTML",
"bytes": "245100"
},
{
"name": "Java",
"bytes": "52185"
},
{
"name": "JavaScript",
"bytes": "24784"
},
{
"name": "Matlab",
"bytes": "19700"
},
{
"name": "NetLogo",
"bytes": "982227"
},
{
"name": "Objective-C",
"bytes": "550"
},
{
"name": "Python",
"bytes": "218542"
},
{
"name": "Shell",
"bytes": "7815"
},
{
"name": "Swift",
"bytes": "80999"
}
],
"symlink_target": ""
} |
''' Creates a merchant_id on the testbed.
This is intended to script some work that today is manual on the testbed.
'''
import requests
import json
import random
import sys
import httplib
def create_merchant(testbed_token, integrator_name):
'''creates a merchant and returns the merchant_id'''
url_base = 'https://mcashtestbed.appspot.com'
print 'Create a merchant on testbed'
headers = {
'Content-Type': 'application/json',
'X-Testbed-Token': testbed_token,
'X-Mcash-Integrator': 'test_integrator'
}
r = requests.post(
url_base + '/testbed/merchant/',
headers=headers
)
print "r.status_code =", r.status_code, " ", httplib.responses[r.status_code]
assert r.status_code == 200, "Expected r.status_code to be 200"
merchant_id = r.headers['x-mcash-merchant']
print "created merchant with merchant_id = ", merchant_id
return merchant_id
| {
"content_hash": "a530cd6092bd19ed17ddc4d299f7cdf5",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 81,
"avg_line_length": 31,
"alnum_prop": 0.667741935483871,
"repo_name": "mcash/merchant-api-python-sdk",
"id": "1e545bbe88378542b323fb03a832f5a1d5aaa119",
"size": "930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_test/create_merchant.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70529"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('trackbuild', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='build',
unique_together=set([('release', 'buildid')]),
),
]
| {
"content_hash": "b3ffb6756ea923e4e195097656cceaea",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 58,
"avg_line_length": 20.764705882352942,
"alnum_prop": 0.5949008498583569,
"repo_name": "miraculixx/trackbuild",
"id": "510cf8526ca0ffd014e3a09b9c5a760db1106106",
"size": "377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trackbuild/migrations/0002_auto_20150718_1936.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49502"
}
],
"symlink_target": ""
} |
import json
from . Utils.MongoRouter import MongoRouter
from django.http import JsonResponse
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_exempt
INDEX_HTML = 'index.html'
router = MongoRouter()
def get_user_board(email):
last_visited = router.route("users").find_one({"email": email}, {"lastVisitedId": 1})["lastVisitedId"],
last_visited = last_visited[0]
results = router.route("users").find_one(
{"email": email, "boards": {"$elemMatch": {"name": last_visited}}},
{"boards": 1}
)["boards"]
for result in results:
if result["name"] == last_visited:
return result
@require_http_methods(["GET", "OPTIONS"])
def login(request):
email = request.session.get("email", None)
if email:
try:
user_board = get_user_board(email)
if user_board:
print "Returning: %s" % user_board
return JsonResponse({"boards": user_board}, status=200)
return JsonResponse({"message": "Last visited board not found"}, status=404)
except Exception as e:
print "No boards found or other error: %s" % unicode(e)
return JsonResponse({"message": unicode(e)}, status=404)
return JsonResponse({"message": "[login] No email"}, status=401)
@require_http_methods(["POST"])
@csrf_exempt
def create_board(request):
email = request.session.get("email", None)
if email:
try:
board_body = json.loads(request.body)
router.route("users").update_one(
{"email": email},
{"$push": {
"boards": board_body
}}
)
return JsonResponse({"board": board_body}, status=200)
except Exception as e:
print "Create board failed with error: %s" % unicode(e)
return JsonResponse({"message": unicode(e)}, status=401)
return JsonResponse({"message": "[create_board] No email"}, status=401)
'''
Request
method: POST,
headers: {
x-auth-token: token
},
url: /board/create,
body: {
name:
}
Response
status 200
body: board
status 403
''' | {
"content_hash": "0312d76cff1e60a2317ff0f44e8f450d",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 107,
"avg_line_length": 27.23170731707317,
"alnum_prop": 0.5902373488580385,
"repo_name": "1729org/Hadron",
"id": "91874dfe4b25f0f7c08f74a933ed29c9d8894043",
"size": "2233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Backend/backend/backend/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1343"
},
{
"name": "HTML",
"bytes": "8937"
},
{
"name": "JavaScript",
"bytes": "32576"
},
{
"name": "Python",
"bytes": "26101"
},
{
"name": "Shell",
"bytes": "4400"
},
{
"name": "TypeScript",
"bytes": "76442"
}
],
"symlink_target": ""
} |
import time #For sleeping
import datetime #For logging timestamps
import os #For reading contents of directories, symlinks and similar
import json #For parsing the config file
import subprocess #For calling external programs, such as "mount"
import signal #For reloading daemon
import re #For "label-regex" option handling
import sys #For stdout and stderr redirection
import threading #For parallel partition processing
from copy import deepcopy #For fixing a bug with copying
import shlex #For fstab/mtab/whatever parsing
import pyrtitions
__version__ = "1.0.0"
config_file = "/etc/pautomount.conf"
#Some globals
config = {}
previous_partitions = []
processed_partitions = []
#These variables are those that affect the work of the daemon. They have default values now,
#but those are overridden by values in the config file.
main_mount_dir = "/media/" #Main directory for relative mountpoints in config and generating mountpoints
default_mount_option = "rw" #Option that is used if drive hasn't got any special options
logfile = "/var/log/pautomount.log"
debug = False #Makes output more verbose
super_debug = False #MORE VERBOSE!
interval = 3 #Interval between work cycles in seconds
noexecute = False #Forbids executing things, logs command to be executed instead
label_char_filter = True #Filters every disk label for every non-ascii character
def log(data):
"""Writes data into a logfile adding a timestamp """
f = open(logfile, "a")
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
f.write(timestamp+" "+str(data)+"\n")
f.close()
def export_globals():
log("Exporting globals from config file")
for variable in config["globals"].keys():
if debug:
log("Exporting variable "+variable+" from config")
globals()[variable] = config["globals"][variable]
def normalize_config(config):
#finished
"""Check config file's structure and contents for everything that can make the daemon fail, spits out warnings about inconsistent entries and deletes them from the daemon's dictionary"""
#Should there be some problems with the logfile, log to the /var/log/daemon.log
#Well, an empty file with curly braces should do. But Python has its own way of handling a try to get a value from a dict by a non-existent key.
#Precisely, it returns an exception, and to catch this, we need to wrap in try:except many blocks.
#I think that the most efficient way is adding the basic keys (config, exceptions, rules and default section) if they don't exist in the actual dictionary.
#Checking everything else is already handled by all the other functions.
categories = {"globals":{}, "exceptions":[], "rules":[], "default":{}}
for category in categories.keys():
if category not in config.keys():
config[category] = categories[category]
#Now check if logfile exists. If it doesn't, we have to create it.
try:
logfile_var = config["globals"]["logfile"]
except KeyError:
logfile_var = "/var/log/pautomount.log"
if not os.path.exists(logfile_var):
try:
os.touch(logfile_var)
except:
logger("Logfile creation in path "+logfile_var+" not permitted. Falling back to default.")
logfile_var = "/var/log/daemon.log"
config["globals"]["logfile"] = logfile_var
#OK. We have a logfile that should work. I suppose we can just redirect stderr and let all
#the uncaught exception output appear there.
#Checks will be added to this function in case lack of check can mean something dreadful.
return config
def log_to_stdout(message):
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(timestamp+" "+str(message))
def compare(arr1, arr2):
"""Compares two arrays - arr1 and arr2. Returns tuple (items that lack from arr2, items that lack from arr1)"""
attached, detached = [item for item in arr1 if item not in arr2], [item for item in arr2 if item not in arr1]
return attached, detached
def execute(*args):
"""Comfortable subprocess wrapper to call external programs"""
if debug:
log("Executing: "+str(args))
if noexecute:
log("'noexecute' turned on, not doing anything, arguments:")
log(str(args))
result = [0, ""] #Totally faking it
else:
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT, shell=True)
result = [0, output]
except subprocess.CalledProcessError as e:
result = [int(e.returncode), e.output]
if debug:
log("Exit code: "+str(result[0])+", output: "+result[1])
return result
def add_processed_partition_entry(part_info, rule):
#This function adds data to processed_partitions dictionary
#Useful mainly on ejects for getting knowledge which directory to `umount`
global processed_partitions
part_info = deepcopy(part_info) #Who knows, maybe this is exactly a place for a bug I've fought with before
if "umount" in rule.keys(): #Saving umount action for later
part_info["umount"] = rule["umount"]
else:
part_info["umount"] = None
processed_partitions.append(part_info)
def remove_processed_partition_entry(part_info):
#When partition gets ejected, we also need to remove any signs of its existence from processed_partitions
global processed_partitions
for entry in deepcopy(processed_partitions):
if entry["uuid"] == part_info["uuid"]: #Checking by uuid because it's 100% working
processed_partitions.remove(entry)
def filter_virtual_devices(current_entries):
virtual_devices = pyrtitions.get_virtual_devices()
return [entry for entry in current_entries if os.path.basename(entry["path"]) not in virtual_devices]
def mark_mounted_partitions(current_entries):
mounted_partitions = pyrtitions.get_mounts()
mounted_devices = list(mounted_partitions.keys())
for entry in current_entries:
if entry["path"] in mounted_devices:
entry["mounted"] = True
else:
entry["mounted"] = False
return current_entries
def read_config():
try:
log("Opening config file - "+config_file)
f = open(config_file, 'r')
d = json.load(f)
f.close()
except (IOError, ValueError) as e:
#if the config file doesn't exist - there's no need in running this daemon.
log("Exception while opening config file")
log(str(e))
sys.exit(1)
return d
def complies_to_rule(partition, rule):
"""Check if the partition parameters comply to the rule given"""
#This function exists because we can have many different conditions when partition parameters apply to the rule
#First one is when UUIDs match
if "uuid" in rule.keys() and partition["uuid"] == rule["uuid"]:
if debug:
log("Partition complies to UUID rule")
return True
#Latter two only apply if partition has a label
elif "label" in partition.keys():
#Second is when there's some label in the rule and it matches to the label of the partition
if "label" in rule.keys() and partition["label"] == rule["label"]:
if debug:
log("Partition complies to label rule")
return True
#Third is when the rule has option "label-regex"
#That means we take this regex, compile it and check
elif "label_regex" in rule.keys():
pattern = re.compile(rule["label_regex"])
if pattern.match(partition["label"]):
if debug:
log("Partition complies to label_regex rule")
return True
else:
return False
else: #No more options to check
return False
else:
return False
def mount_wrapper(partition, mount_rule):
"""Wrapper around mount(), takes care of "mount lists" function"""
#Could be possibly made as a decorator...
if type(mount_rule) != list:
mount_rule = [mount_rule]
else:
log("Received mount list with "+len(mount_rule)+"elements")
mountpoint = None
for rule in mount_rule:
result = mount(partition, rule)
if not mountpoint and result: #Mountpoint is not set, result of mount() is not negative
#First mountpoint which is used is to be returned by mount_wrapper() as a mountpoint
mountpoint = result
return mountpoint
def mount(partition, mount_rule):
"""Mount function, wrapper around execute()"""
if not mount_rule:
return None #Don't need to react at all
#log(mount_rule.keys())
if type(mount_rule) != dict or "mountpoint" not in mount_rule.keys():
mountpoint = pyrtitions.generate_mountpoint(partition)
else:
mountpoint = mount_rule["mountpoint"]
mountpoint = return_absolute_mountpoint(mountpoint)
if type(mount_rule) != dict or "options" not in mount_rule.keys():
options = default_mount_option
else:
options = mount_rule["options"]
try:
ensure_path_exists(mountpoint)
except:
log("Directory creation failed, path: "+mountpoint)
raise Exception #Path creation failed - throw exception...
#TODO - change exception type
#Now kiss!^W^W^W^W execute!
log("Trying to mount partition "+partition["uuid"]+" on path "+mountpoint)
command = "mount "+partition["path"]+" "+mountpoint+" -o "+options
output = execute(command)
if output[0] != 0:
log("Mount failed. Exit status: "+str(output[0]))
log("Output: "+output[1])
return None
else:
log("Partition "+partition["uuid"]+" successfully mounted")
return mountpoint
def execute_script_wrapper(script_path, part_info=None):
#script_path might as well be list, so we need to make a workaround here
if type(script_path) != list:
script_path = list([script_path])
for script in script_path:
execute_custom_script(script, part_info=part_info)
def execute_custom_script(script_path, part_info=None):
"""Function to execute arbitrary script - main function is to arrange arguments in a correct order"""
#First of all, there are two ways to call this function.
#If you don't supply part_info, it just calls some command without options
#If you supply part_info, it calls that command giving info about partition as arguments
#Second occasion is handy for custom scripts
#Okay, we have some arguments and options
#Arguments are partition's block device path and uuid
#Options are... Mountpoint and label, for example. Can't think of many now.
if part_info:
device = part_info["path"]
uuid = part_info["uuid"]
if "mountpoint" in part_info.keys():
mountpoint = part_info["mountpoint"] #Might need to be escaped as may contain spaces and so on
else:
mountpoint = "None"
uuid = part_info["uuid"]
if "label" in part_info.keys():
label = part_info["label"] #Might need to be escaped as well
else:
label = "None"
#Script will be called like '/path/to/script /dev/sda1 U1U2-I3D4 /media/4GB-Flash Flashdrive'
command = script_path+" "+device+" "+uuid+" "+mountpoint+" "+label
else:
command = script_path
log("Calling external script: "+command)
output = execute(command)
if output[0] != 0:
log("Calling external script failed. Exit status: "+str(output[0]))
log("Output: "+output[1])
else:
log("Calling external script succeeded.")
def return_absolute_mountpoint(path):
"""We can specify both relative and absolute path in config file. This function adds main_mount_dir to all relative paths."""
if os.path.isabs(path):
path = path
else:
path = os.path.join(main_mount_dir, path)
return path
def ensure_path_exists(path):
if not os.path.isdir(path):
log("Mountpoint does not seem to exist. Quickly fixing this...")
os.makedirs(path)
return True
def main_loop():
global previous_partitions
current_partitions = pyrtitions.get_uuids_and_labels()
current_partitions = filter_virtual_devices(current_partitions)
attached, detached = compare(current_partitions, previous_partitions)
attached = deepcopy(attached) #Fixing a bug with compare() when modifying elements in attached() led to previous_partitions being modified
detached = deepcopy(detached) #Preventing a bug in the future
#We need to copy "current_partitions" into "previous_partitions" now
#If current_partition is modified, it may lead to attempt to reattach partition in the next step
previous_partitions = current_partitions
if attached:
log("Found "+str(len(attached))+" attached partition(s)")
if debug:
log(str(attached))
if detached:
log("Found "+str(len(detached))+" detached partition(s)")
if debug:
log(str(detached))
#Start processing every attached drive
attached = mark_mounted_partitions(attached)
for partition in attached:
if partition["mounted"]: #This is for ignoring partitions that have been mounted when daemon starts but aren't in processed_partition dictionary - such as root partition and other partitions in fstab
log("Partition already mounted, not doing anything")
continue
t = threading.Thread(target = process_attached_partition, args = tuple([partition])) #tuple([]) is a fix for a problem with *args that is totally ununderstandable for me and I don't even want to dig through this shit. It doesn't accept a tuple, but accepts tuple(list). So - this fix isn't dirty, just quick =)
t.daemon = True
t.start()
for partition in detached:
t = threading.Thread(target = process_detached_partition, args = tuple([partition])) #tuple([]) is a fix for a problem with *args that is totally ununderstandable for me and I don't even want to dig through this shit. It doesn't accept a tuple, but accepts tuple(list). So - this fix isn't dirty, just quick =)
t.daemon = True
t.start()
pass
if super_debug:
log(str(current_partitions))
if debug:
log("Sleeping...")
pass
def process_attached_partition(*args, **kwargs):
partition = args[0]
log("Processing attached drive with UUID "+partition["uuid"])
action_taken = False
for exception in config["exceptions"]:
if complies_to_rule(partition, exception):
#Well, we don't need to do anything
#Other than
action_taken = True
if debug:
log("Partition complies to exception rule: "+str(exception))
else:
log("Partition "+partition["uuid"]+" complies to exception rule.")
break
for rule in config["rules"]:
if complies_to_rule(partition, rule) and action_taken == False:
partition["mountpoint"] = None
if "mount" in rule.keys() and rule["mount"]:
partition["mountpoint"] = mount_wrapper(partition, rule["mount"])
if "command" in rule.keys() and rule["command"]:
execute_script_wrapper(rule["command"])
if "script" in rule.keys() and rule["script"]:
execute_script_wrapper(rule["script"], part_info=partition)
add_processed_partition_entry(partition, rule)
action_taken = True
if debug:
log("Partition complies to rule: "+str(rule))
else:
log("Partition "+partition["uuid"]+" complies to rule.")
if action_taken == False:
#And now for the defaults
log("No rule that suits this partition, taking actions set by default.")
default = config["default"]
partition["mountpoint"] = None
if "mount" in default.keys() and default["mount"]:
partition["mountpoint"] = mount_wrapper(partition, default["mount"])
if "command" in default.keys() and default["command"]:
execute_script_wrapper(default["command"])
if "script" in default.keys() and default["script"]:
execute_script_wrapper(default["script"], part_info=partition)
add_processed_partition_entry(partition, default)
#That seems it, by this time action is already taken/exception is made.
return #No need in return value.
def process_detached_partition(*args, **kwargs):
part_info = args[0]
log("Processing detached drive with UUID "+part_info["uuid"])
for partition in processed_partitions:
if partition["uuid"] == part_info["uuid"]:
if "umount"in partition.keys() and partition["umount"]:
#The same command list support, just executing all the commands one by one
execute_script_wrapper(partition["umount"])
if "mountpoint" in partition.keys() and partition["mountpoint"]:
#Unmounting the mountpoint where device was mounted - just in case
exit_status = 0
while exit_status != 0:
exit_status = execute("umount "+partition["mountpoint"]+"")[0]
else:
continue
remove_processed_partition_entry(part_info)
def set_output():
"""This function looks for a certain command-line option presence and sets stdout and stderr accordingly."""
global log
option = "-e"
if option in [element.strip(" ") for element in sys.argv]:
#Flag for debugging to make pautomount output stderr to console
log = log_to_stdout #Reassigns logging function
else:
f = open(logfile, "a")
sys.stderr = f
sys.stdout = f
def load_config():
global config
config = read_config()
config = normalize_config(config)
export_globals()
log("Config loaded and parsed successfully")
def reload(signum, frame):
#Is just a wrapper for load_config
#Just in case we will need more sophisticated signal processing
log("Reloading on external signal")
load_config()
def main():
signal.signal(signal.SIGHUP, reload) #Makes daemon reloading possible
set_output() #Decides where to output logging messages
load_config() #Manages config - loads it, cleans it up and exports globals
if super_debug:
debug = True
while True:
main_loop() #Starts daemon
time.sleep(interval)
if __name__ == "__main__":
main()
| {
"content_hash": "9aaa15e075045415152c19e549e5c1af",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 318,
"avg_line_length": 44.85990338164251,
"alnum_prop": 0.6564182640534137,
"repo_name": "CRImier/pautomount",
"id": "dbe7c5e31a3d3480d0bab1395ef5bf7d6391a2d7",
"size": "18595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pautomount.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19875"
},
{
"name": "Shell",
"bytes": "2507"
}
],
"symlink_target": ""
} |
"""Utilities for reading and writing sentences in dragnn."""
import tensorflow as tf
from syntaxnet.ops import gen_parser_ops
class ConllSentenceReader(object):
"""A reader for conll files, with optional projectivizing."""
def __init__(self, filepath, batch_size=32,
projectivize=False, morph_to_pos=False):
self._graph = tf.Graph()
self._session = tf.Session(graph=self._graph)
task_context_str = """
input {
name: 'documents'
record_format: 'conll-sentence'
Part {
file_pattern: '%s'
}
}""" % filepath
if morph_to_pos:
task_context_str += """
Parameter {
name: "join_category_to_pos"
value: "true"
}
Parameter {
name: "add_pos_as_attribute"
value: "true"
}
Parameter {
name: "serialize_morph_to_pos"
value: "true"
}
"""
with self._graph.as_default():
self._source, self._is_last = gen_parser_ops.document_source(
task_context_str=task_context_str, batch_size=batch_size)
self._source = gen_parser_ops.well_formed_filter(self._source)
if projectivize:
self._source = gen_parser_ops.projectivize_filter(self._source)
def read(self):
"""Reads a single batch of sentences."""
if self._session:
sentences, is_last = self._session.run([self._source, self._is_last])
if is_last:
self._session.close()
self._session = None
else:
sentences, is_last = [], True
return sentences, is_last
def corpus(self):
"""Reads the entire corpus, and returns in a list."""
tf.logging.info('Reading corpus...')
corpus = []
while True:
sentences, is_last = self.read()
corpus.extend(sentences)
if is_last:
break
tf.logging.info('Read %d sentences.' % len(corpus))
return corpus
| {
"content_hash": "a693711ec091a9b6b3cbf34546db33a6",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 75,
"avg_line_length": 30.734375,
"alnum_prop": 0.5693950177935944,
"repo_name": "hang-qi/models",
"id": "444e621cfab0066f333aef84fca33d8f78d5318d",
"size": "1967",
"binary": false,
"copies": "2",
"ref": "refs/heads/hemingway",
"path": "syntaxnet/dragnn/python/sentence_io.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "662"
},
{
"name": "C++",
"bytes": "1167920"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "61098"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Protocol Buffer",
"bytes": "24249"
},
{
"name": "Python",
"bytes": "2633576"
},
{
"name": "Shell",
"bytes": "45143"
}
],
"symlink_target": ""
} |
import json
from typing import Optional
from rivr.http import Request, Response
class ProblemResponse(Response):
def __init__(self, status: int, title: Optional[str] = None):
content = json.dumps({'title': title})
super(ProblemResponse, self).__init__(
content=content, status=status, content_type='application/problem+json'
)
class RESTResponse(Response):
def __init__(self, request: Request, payload, status: Optional[int] = None):
content = json.dumps(payload)
content_type = 'application/json'
super(RESTResponse, self).__init__(content, status, content_type)
| {
"content_hash": "7955618ddd7c1e5c3c08a69574b54efd",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 83,
"avg_line_length": 31.9,
"alnum_prop": 0.664576802507837,
"repo_name": "cocodelabs/api.palaverapp.com",
"id": "a57348ed2ed730600dced6953aa01be3877c6f94",
"size": "638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "palaverapi/responses.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "API Blueprint",
"bytes": "2153"
},
{
"name": "Python",
"bytes": "51073"
},
{
"name": "Shell",
"bytes": "128"
}
],
"symlink_target": ""
} |
import lmdb
import os
import tensorflow as tf
import random
import numpy as np
import cv2
def ascii2Label(ascii):
if ascii >= 48 and ascii <=57:
c = ascii - 48
elif ascii >= 65 and ascii <=90:
c = ascii - 65 +10
elif ascii >=97 and ascii <=122:
c = ascii - 97 +10
return c
def str2intLable(strs):
assert type(strs) is list
nums = len(strs)
maxLength = 0
indices = []
values = []
seqLengths = []
for i in range(nums):
length = len(strs[i])
if length > maxLength:
maxLength = length
for j in range(length):
indices.append([i, j])
values.append(ascii2Label(ord(strs[i][j])))
seqLengths.append(length)
dense_shape = [nums, maxLength]
indices = np.asarray(indices, dtype=np.int32)
values = np.asarray(values, dtype=np.int32)
dense_shape = np.asarray(dense_shape, dtype=np.int32)
return (indices, values, dense_shape), seqLengths
class DatasetLmdb:
def __init__(self, lmdbPath):
self.env = lmdb.open(lmdbPath, map_size=1099511627776)
with self.env.begin() as txn:
self.nSamples = int(txn.get('num-samples'))
def getNumSamples(self):
return self.nSamples
def nextBatch(self, batchSize):
imgW = 100
imgH = 32
randomIndex = random.sample(range(1, self.nSamples), batchSize)
imageList = []
labelList = []
imageKeyList = []
images = []
errorCounter = 0
with self.env.begin() as txn:
for i in range(batchSize):
idx = randomIndex[i]
imageKey = 'image-%09d' % idx
labelKey = 'label-%09d' % idx
imageBin = txn.get(imageKey)
labelBin = txn.get(labelKey)
imageList.append(imageBin)
labelList.append(labelBin)
imageKeyList.append(imageKey)
for i in range(batchSize):
imageBin = imageList[i]
imageBuf = np.fromstring(imageBin, dtype=np.uint8)
decompressedImg = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
# print decompressedImg.shape
resized = cv2.resize(decompressedImg, (imgW, imgH))
# print resized.shape
images.append(resized)
images = np.asarray(images)
labels, seqLengths = str2intLable(labelList)
return images, labels, seqLengths
class SynthLmdb:
def __init__(self, lmdbPath,dataPath):
self.env = lmdb.open(lmdbPath, map_size=1099511627776)
with self.env.begin() as txn:
self.nSamples = int(txn.get('num-samples'))
self.dataPath = dataPath
def getNumSamples(self):
return self.nSamples
def nextBatch(self, batchSize):
imgW = 100
imgH = 32
randomIndex = random.sample(range(1, self.nSamples), batchSize)
imageList = []
labelList = []
images = []
with self.env.begin() as txn:
for i in range(batchSize):
idx = randomIndex[i]
imageKey = '%08d' % idx
imagePath = txn.get(imageKey)
label = os.path.splitext(os.path.split(imagePath)[-1])[0].split('_')[1]
imageList.append(imagePath)
labelList.append(label)
for i in range(batchSize):
decompressedImg = cv2.imread(os.path.join(self.dataPath, imageList[i]), cv2.IMREAD_GRAYSCALE)
# print decompressedImg.shape
resized = cv2.resize(decompressedImg, (imgW, imgH))
# print resized.shape
images.append(resized)
images = np.asarray(images)
labels, seqLengths = str2intLable(labelList)
return images, labels, seqLengths
if __name__ == '__main__':
# db = SynthLmdb("../data/Synth/test_data", "../data/Synth")
db = DatasetLmdb("../data/IIIT5K")
batches, labels, seqLengths = db.nextBatch(10)
import utility
pred = utility.convertSparseArrayToStrs(labels)
print batches.shape, pred, seqLengths, labels[2]
# for b in batches:
# print b.shape
# cv2.imshow("output", b)
# cv2.waitKey(0) | {
"content_hash": "4ea52f08f2f8ca72238c154c0cedc300",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 96,
"avg_line_length": 28.580645161290324,
"alnum_prop": 0.6887697516930023,
"repo_name": "wcy940418/CRNN-end-to-end",
"id": "ba80d7cbd2c77aa56336f132f12b4407a9d76bb9",
"size": "3544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26297"
}
],
"symlink_target": ""
} |
import logging
import queue
import socket
import sys
import threading
from . import Client
from . import Room
import shelve
logging.basicConfig(filename='logs.log',
level=logging.DEBUG,
format='%(levelname)s %(asctime)-15s %(message)s')
class Server(threading.Thread):
def __init__(self):
super().__init__()
self.host = ''
self.port = 1234
self.backlog = 5
self.size = 1024
self.server = None
self.waiting_room = None
self.running = False
self.last_client_id = 0
self.clients = []
self.new_clients = queue.Queue()
with shelve.open('database', 'c') as db:
if 'PLAYERS' not in db.keys():
db['PLAYERS'] = {}
self.database = db['PLAYERS']
def open_socket(self):
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((self.host, self.port))
self.server.listen(5)
except socket.error as error:
value, message = error
if self.server:
self.server.close()
logging.warning("Could not open socket: " + message)
sys.exit(1)
def run(self):
self.open_socket()
self.running = True
logging.info("Server started")
self.waiting_room = Room.RoomController(self.new_clients)
self.waiting_room.start()
while self.running:
try:
connection = self.server.accept()
client = Client.Client(connection, self.last_client_id)
self.last_client_id += 1
client.start()
self.clients.append(client)
self.new_clients.put(client)
_, address = connection
logging.info('Connected ' + str(address[0]) + ':' + str(address[1]))
except socket.error as error:
logging.warning("Connection ERROR " + str(error))
def stop(self):
self.running = False
for client in self.clients:
client.client.close()
client.running = False
client.join()
for room in self.waiting_room.rooms:
room.running = False
room.join()
self.server.close()
self.waiting_room.running = False
self.waiting_room.join()
self.join()
logging.info("Server stopped")
class ServerController:
def __init__(self):
self.server = None
def run(self):
running = True
while running:
command = input('$: ')
if command.lower() == 'start':
self.start_server()
elif command.lower() == 'stop':
self.stop_server()
elif command.lower() == 'exit':
self.close()
elif command.lower() == 'clients':
self.print_clients()
elif command.lower() == 'database':
self.print_database()
else:
print("Not known command")
def start_server(self):
if self.server is None:
self.server = Server()
self.server.start()
print("Server started")
def stop_server(self):
if self.server is not None:
self.server.stop()
self.server.join()
self.server = None
print("Server stopped")
def close(self):
self.stop_server()
sys.exit(0)
def print_clients(self):
print(self.server.clients)
def print_database(self):
print(self.server.database)
| {
"content_hash": "cf04da40ea9cc9ffd9e509d91cb0444f",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 84,
"avg_line_length": 25.97241379310345,
"alnum_prop": 0.5270844397238449,
"repo_name": "AThom0x7cc/SimpleMultiplayerGame",
"id": "47ba00b0211f4ef7f988e6442c615bd13dc488f3",
"size": "3766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/server/Server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17866"
}
],
"symlink_target": ""
} |
import unittest
from flowy.exception import SuspendTask, TaskError, TaskTimedout
class ResultTest(unittest.TestCase):
def test_placeholder(self):
from flowy.result import Placeholder
res = Placeholder()
self.assertRaises(SuspendTask, res.result)
def test_error(self):
from flowy.result import Error
res = Error('reason')
self.assertRaisesRegexp(TaskError, 'reason', res.result)
def test_timeout(self):
from flowy.result import Timeout
res = Timeout()
self.assertRaises(TaskTimedout, res.result)
def test_result(self):
from flowy.result import Result
res = Result('reason')
self.assertEquals('reason', res.result())
| {
"content_hash": "6ab55e6e28dc0b6c84b0d48e41177a51",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 64,
"avg_line_length": 25.413793103448278,
"alnum_prop": 0.6648575305291723,
"repo_name": "severb/flowy-website",
"id": "93d516c2fb38eff4b2b3187c80ed9efea68f005a",
"size": "737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flowy_module/flowy/tests/test_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "78754"
},
{
"name": "JavaScript",
"bytes": "710652"
},
{
"name": "Python",
"bytes": "3013303"
},
{
"name": "Shell",
"bytes": "623"
}
],
"symlink_target": ""
} |
import threading
import pytest
from tartpy.runtime import behavior, SimpleRuntime
from tartpy.eventloop import EventLoop
from tartpy.tools import Wait
runtime = SimpleRuntime()
def test_runtime_error():
err = False
class TestRuntime(SimpleRuntime):
def throw(self, message):
nonlocal err
err = True
@behavior
def beh(self, msg):
1/0
test_rt = TestRuntime()
x = test_rt.create(beh)
x << 5
EventLoop().run_once()
assert err is True
def test_self_create():
result = None
@behavior
def foo(self, msg):
nonlocal result
result = msg
@behavior
def beh(self, msg):
a = self.create(foo)
a << msg
x = runtime.create(beh)
x << 5
EventLoop().run_once()
assert result == 5
def test_receive_message():
result = None
@behavior
def beh(self, msg):
nonlocal result
result = msg
a = runtime.create(beh)
a << 5
EventLoop().run_once()
assert result == 5
def test_create_with_args():
result = None
@behavior
def beh(arg, self, msg):
nonlocal result
result = arg
a = runtime.create(beh, True)
a << 0
EventLoop().run_once()
assert result is True
def test_one_shot():
sink_beh_done = False
destination_beh_done = False
message = None
@behavior
def one_shot_beh(destination, self, msg):
destination << msg
self.become(sink_beh)
@behavior
def sink_beh(self, msg):
assert msg == 'second'
nonlocal sink_beh_done
sink_beh_done = True
@behavior
def destination_beh(self, msg):
nonlocal message, destination_beh_done
message = msg
destination_beh_done = True
destination = runtime.create(destination_beh)
one_shot = runtime.create(one_shot_beh, destination)
one_shot << 'first'
one_shot << 'second'
EventLoop().run_once()
assert message == 'first'
assert sink_beh_done and destination_beh_done
def test_serial():
first_msg = second_msg = third_msg = None
first_behavior = second_behavior = third_behavior = None
first = second = third = None
@behavior
def first_beh(self, msg):
self.become(second_beh)
nonlocal first_msg, first_behavior, first
first_msg = msg
first_behavior = record()
first = True
@behavior
def second_beh(self, msg):
self.become(third_beh)
nonlocal second_msg, second_behavior, second
second_msg = msg
second_behavior = record()
second = True
@behavior
def third_beh(self, msg):
nonlocal third_msg, third_behavior
third_msg = msg
third_behavior = record()
def record():
return bool(first), bool(second), bool(third)
serial = runtime.create(first_beh)
serial << 'foo'
serial << 'foo'
serial << 'foo'
EventLoop().run_once()
assert first_msg == 'foo' and second_msg == 'foo' and third_msg == 'foo'
assert first_behavior == (False, False, False)
assert second_behavior == (True, False, False)
assert third_behavior == (True, True, False)
def test_runtime_event_loop():
@behavior
def null_beh(self, msg):
pass
success = False
def f():
nonlocal success
null = runtime.create(null_beh)
success = True
thread = threading.Thread(target=f)
thread.start()
thread.join()
assert success
def test_wait():
w = Wait()
wait = runtime.create(w.wait_beh)
wait << 'foo'
EventLoop().run_once()
result = w.join()
assert result == 'foo'
def test_wait_timeout_zero():
w = Wait(timeout=0)
wait = runtime.create(w.wait_beh)
EventLoop().run_once()
result = w.join()
assert result is None
w = Wait(timeout=0)
wait = runtime.create(w.wait_beh)
wait << 'foo'
EventLoop().run_once()
result = w.join()
assert result == 'foo'
def test_wait_timout_nonzero():
w = Wait(timeout=0.5)
wait = runtime.create(w.wait_beh)
result = w.join()
assert result is None
def test_attribute_access():
result = None
@behavior
def beh(self, message):
nonlocal result
result = message.result
actor = runtime.create(beh)
actor << {'foo': 4, 'result': True}
EventLoop().run_once()
assert result
def test_integer_message_attribute():
result = None
@behavior
def beh(self, message):
nonlocal result
result = message
actor = runtime.create(beh)
actor << 2
EventLoop().run_once()
assert isinstance(result, int) and result == 2
| {
"content_hash": "4b3e8df4c1c417fc79e2643228ead13d",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 76,
"avg_line_length": 20.82894736842105,
"alnum_prop": 0.5902295220046325,
"repo_name": "waltermoreira/tartpy",
"id": "7f0bceba052848ab9c192e1208d2c85d74e4449b",
"size": "4749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tartpy/tests/test_actor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34631"
}
],
"symlink_target": ""
} |
import os
import requests as r
from env2config.interface import LineOriented
DEFAULT_URL = \
"https://raw.githubusercontent.com/apache/kafka/{version}/config/{filename}"
class KafkaDefinition(LineOriented):
service_name = 'kafka'
def get_tags(self):
return {
'scala': self.tags.get('scala', '2.11')
}
def default_configs(self):
version = self.version
def get_default(filename):
url = DEFAULT_URL.format(version=version, filename=filename)
response = r.get(url)
if response.status_code == 200:
text = response.text
return text
else:
raise ValueError(version)
return {
'consumer.properties': lambda: get_default('consumer.properties'),
'log4j.properties': lambda: get_default('log4j.properties'),
'producer.properties': lambda: get_default('producer.properties'),
'server.properties': lambda: get_default('server.properties'),
'zookeeper.properties': lambda: get_default('zookeeper.properties'),
}
def config_mapping(self):
scala_version = self.get_tags()['scala']
root = '/opt/kafka_{scala_version}-{version}'.format(
scala_version=scala_version,
version=self.version
)
mapping = dict(
(filename, os.path.join(root, 'config', filename))
for filename in self.default_configs()
)
return mapping
def config_multiplex(self, config_name):
split_point = config_name.find('_')
prefix = config_name[:split_point]
rest = config_name[split_point + 1:]
config_file = prefix.lower() + '.properties'
return config_file, rest
def ignore_env_names(self):
return [
'KAFKA_HOME',
]
def convert_name(self, config_name):
parts = config_name.split('_')
formatted = '.'.join(p.lower() for p in parts)
return formatted
def convert_value(self, config_value):
return config_value
def match_line(self, line, config_name):
content = line.replace('#', '').strip()
matches = content.split('=')[0] == config_name
return matches
def inject_line(self, old_line, config_name, config_value):
new_line = '{0}={1}\n'.format(config_name, config_value)
return new_line
def comment_line(self, content):
return '# ' + content + '\n'
| {
"content_hash": "37894c74ecf29c7b62990668a831dd79",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 80,
"avg_line_length": 29.337209302325583,
"alnum_prop": 0.5814506539833532,
"repo_name": "dacjames/env2config",
"id": "11b83e0a2934fd3303ae5d57496490535bcf6f26",
"size": "2523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env2config/services/kafka.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33725"
},
{
"name": "Shell",
"bytes": "57"
}
],
"symlink_target": ""
} |
import sys
import os
sys.setrecursionlimit(15000)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx_autodoc_typehints",
"sphinx.ext.viewcode",
"sphinx.ext.graphviz",
"sphinx.ext.inheritance_diagram",
"sphinx_copybutton",
"myst_parser",
]
# conf for copybutton
copybutton_prompt_text = ">>> "
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "SpiceyPy"
copyright = "2014-2022, Andrew Annex"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "5.1.2"
# The full version, including alpha/beta/rc tags.
release = "5.1.2"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"navigation_depth": 4}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "SpiceyPy Docs"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "SpiceyPydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '\setcounter{tocdepth}{2000}'
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
("index", "SpiceyPy.tex", "SpiceyPy Documentation", "Andrew Annex", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "spiceypy", "SpiceyPy Documentation", ["Andrew Annex"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"spiceypy",
"SpiceyPy Documentation",
"Andrew Annex",
"SpiceyPy",
"The NASA JPL NAIF SPICE toolkit wrapper written in Python.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = "SpiceyPy"
epub_author = "Andrew Annex"
epub_publisher = "Andrew Annex"
epub_copyright = "2014-2022, Andrew Annex"
# The basename for the epub file. It defaults to the project name.
# epub_basename = 'SpiceyPy'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
| {
"content_hash": "7366be3839641fe14e44aa6cdc4621f7",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 82,
"avg_line_length": 31.298780487804876,
"alnum_prop": 0.6968634326904345,
"repo_name": "AndrewAnnex/SpiceyPy",
"id": "a81978ab98cc2acf8d082bcbe17490b7825ea2a2",
"size": "10710",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1117"
},
{
"name": "Python",
"bytes": "1132264"
},
{
"name": "TeX",
"bytes": "7777"
}
],
"symlink_target": ""
} |
from flask import Flask, redirect, url_for, request, Response, render_template
import wikipedia
app = Flask(__name__)
@app.route('/')
def student():
return render_template('index.html')
@app.route('/search/<name>')
def success(message):
return '%s' % message
@app.route('/search',methods = ['POST', 'GET'])
def login():
if request.method == 'POST':
message_body = request.form['searchkey']
replyText = message_body
# print search_item['key']
print "\n\nSearch input: ", message_body
replyText,keyT = getReply(message_body)
# print keyT
# return redirect(url_for('success',name = 'aaa',message = 'csb'))
return render_template('output.html', answer = replyText, key = keyT)
# return '<p>' + 'Thanks for using the App, below is the response: '+ '</p>' + '<p>' + replyText + '</p>' + '<br>' + \
# "<b><a href = '/'>click here to go back to Search</a></b>"
# return Response(replyText, mimetype='text/plain')
else:
message_body = request.args.get('searchkey')
replyText,keyT = getReply(message_body)
return render_template('output.html', answer = replyText)
def removeHead(fromThis, removeThis):
if fromThis.endswith(removeThis):
fromThis = fromThis[:-len(removeThis)].strip()
elif fromThis.startswith(removeThis):
fromThis = fromThis[len(removeThis):].strip()
return fromThis
# Function to formulate a response based on message input.
def getReply(message):
key = "None"
# Make the message lower case and without spaces on the end for easier handling
message = message.lower().strip()
# This is the variable where we will store our response
answer = ""
# if "weather" in message:
# answer = 'get the weather using a weather API'
# is the keyword "wolfram" in the message? Ex: "wolfram integral of x + 1"
# if "wolfram" in message:
# answer = 'get a response from the Wolfram Alpha API'
# # is the keyword "wiki" in the message? Ex: "wiki donald trump"
# elif "wiki" in message:
# answer = 'get a response from the Wikipedia API'
# is the keyword "wiki" in the message? Ex: "wiki donald trump"
if "wiki" in message:
key = "wiki"
# remove the keyword "wiki" from the message
message = removeHead(message, "wiki")
# Get the wikipedia summary for the request
try:
# Get the summary off wikipedia
answer = wikipedia.summary(message)
except:
# handle errors or non specificity errors (ex: there are many people
# named donald)
answer = "Request was not found using wiki. Be more specific?"
# is the keyword 'some_keyword' in the message? You can create your own custom
# requests! Ex: 'schedule Monday'
# elif 'some_keyword' in message:
# answer = 'some response'
# the message contains no keyword. Display a help prompt to identify possible
# commands
else:
answer = "\n Welcome! These are the commands you may use: \nWOLFRAM \"wolframalpha request\" \nWIKI \"wikipedia request\"\nWEATHER \"place\"\nSOME_KEYWORD \"some custom request\"\n"
# Twilio can not send messages over 1600 characters in one message. Wikipedia
# summaries may have way more than this.
# So shortening is required (1500 chars is a good bet):
# if len(answer) > 1500:
# answer = answer[0:1500] + "..."
# return the formulated answer
return answer, key
if __name__ == '__main__':
app.run()
| {
"content_hash": "b5ee807b4cd3ee33a7c25d87020000b3",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 189,
"avg_line_length": 36.75257731958763,
"alnum_prop": 0.6350631136044881,
"repo_name": "csbenk/flash-experiment",
"id": "6048c30e61b2cc746476ffb22b002fd931646ead",
"size": "3565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "805"
},
{
"name": "Python",
"bytes": "3565"
}
],
"symlink_target": ""
} |
"""
Copyright 2016 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from lib import excel_action, excel_reader
class GetExcelSheetsAction(excel_action.ExcelAction):
def run(self, sheet, excel_file=None, key_column=None,
variable_name_row=None):
self.replace_defaults(excel_file, key_column, variable_name_row)
excel = excel_reader.ExcelReader(self._excel_file)
excel.set_sheet(sheet, key_column=self._key_column,
var_name_row=self._var_name_row,
strict=True)
return excel.get_variable_names()
| {
"content_hash": "5bdbde47d064450df6e396914f23973b",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 39.607142857142854,
"alnum_prop": 0.7168620378719567,
"repo_name": "StackStorm/st2contrib",
"id": "8c9c077be1b0ecc0e78b28b509f2485a976d0b80",
"size": "1109",
"binary": false,
"copies": "2",
"ref": "refs/heads/st2contrib-deprecated-archive",
"path": "archive/packs/excel/actions/get_keys_for_columns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "5581"
},
{
"name": "Python",
"bytes": "1362240"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "7781"
}
],
"symlink_target": ""
} |
"""
SDoc
Copyright 2016 Set Based IT Consultancy
Licence MIT
"""
# ----------------------------------------------------------------------------------------------------------------------
from sdoc.sdoc2.NodeStore import NodeStore
from sdoc.sdoc2.node.Node import Node
class CaptionNode(Node):
"""
SDoc2 node for captions.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, io, options, argument):
"""
Object constructor.
:param None|cleo.styles.output_style.OutputStyle io: The IO object.
:param dict[str,str] options: The options of this caption.
:param str argument: The title of this caption.
"""
super().__init__(io, 'caption', options, argument)
# ------------------------------------------------------------------------------------------------------------------
def get_command(self):
"""
Returns the command of this node, i.e. caption.
:rtype: str
"""
return 'caption'
# ------------------------------------------------------------------------------------------------------------------
def is_block_command(self):
"""
Returns False.
:rtype: bool
"""
return False
# ------------------------------------------------------------------------------------------------------------------
def is_inline_command(self):
"""
Returns True.
:rtype: bool
"""
return True
# ----------------------------------------------------------------------------------------------------------------------
NodeStore.register_inline_command('caption', CaptionNode)
| {
"content_hash": "a0d0a8f5c419c211e918a540c31fa410",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 120,
"avg_line_length": 30.92982456140351,
"alnum_prop": 0.33862733976176973,
"repo_name": "OlegKlimenko/py-sdoc",
"id": "d2ad74adafe295adcbaa04e8f473b6f5296152f6",
"size": "1763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdoc/sdoc2/node/CaptionNode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "7975"
},
{
"name": "Python",
"bytes": "405351"
}
],
"symlink_target": ""
} |
import pytest
import pandas as pd
import random
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.chronos.data.repo_dataset import get_public_dataset
class TestRepoDataset(ZooTestCase):
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def test_init_dataset(self):
name = random.sample([x for x in range(10)], 5)
path = '~/.chronos/dataset'
with pytest.raises(AssertionError):
get_public_dataset(name, path=path, redownload=False)
name = 'nyc_taxi'
path = random.sample([x for x in range(10)], 5)
with pytest.raises(AssertionError):
get_public_dataset(name, path=path, redownload=False)
name = 'chronos_dataset'
path = '~/.chorons/dataset/'
with pytest.raises(NameError):
get_public_dataset(name, path=path, redownload=False)
| {
"content_hash": "b3a4e4d3de2b241b9015a779a9e13ccc",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 65,
"avg_line_length": 30.466666666666665,
"alnum_prop": 0.649890590809628,
"repo_name": "intel-analytics/analytics-zoo",
"id": "f07a0d14a8d82ed8b5110660f5e257b576a5e147",
"size": "1504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyzoo/test/zoo/chronos/data/test_repo_dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "73165"
},
{
"name": "Groovy",
"bytes": "1613"
},
{
"name": "Java",
"bytes": "209136"
},
{
"name": "Jupyter Notebook",
"bytes": "24437284"
},
{
"name": "Makefile",
"bytes": "11724"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "4085490"
},
{
"name": "RobotFramework",
"bytes": "17467"
},
{
"name": "Scala",
"bytes": "3562801"
},
{
"name": "Shell",
"bytes": "413512"
}
],
"symlink_target": ""
} |
from django.forms import widgets
from django.forms.fields import BooleanField, CharField, ChoiceField, MultipleChoiceField
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _
from entangled.forms import EntangledModelFormMixin
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.icon.plugin_base import IconPluginMixin
from cmsplugin_cascade.icon.forms import IconFormMixin
from cmsplugin_cascade.link.config import LinkPluginBase, LinkFormMixin
from cmsplugin_cascade.link.plugin_base import LinkElementMixin
class ButtonTypeWidget(widgets.RadioSelect):
"""
Render sample buttons in different colors in the button's backend editor.
"""
template_name = 'cascade/admin/widgets/button_types.html'
class ButtonSizeWidget(widgets.RadioSelect):
"""
Render sample buttons in different sizes in the button's backend editor.
"""
template_name = 'cascade/admin/widgets/button_sizes.html'
class ButtonFormMixin(EntangledModelFormMixin):
BUTTON_TYPES = [
('btn-primary', _("Primary")),
('btn-secondary', _("Secondary")),
('btn-success', _("Success")),
('btn-danger', _("Danger")),
('btn-warning', _("Warning")),
('btn-info', _("Info")),
('btn-light', _("Light")),
('btn-dark', _("Dark")),
('btn-link', _("Link")),
('btn-outline-primary', _("Primary")),
('btn-outline-secondary', _("Secondary")),
('btn-outline-success', _("Success")),
('btn-outline-danger', _("Danger")),
('btn-outline-warning', _("Warning")),
('btn-outline-info', _("Info")),
('btn-outline-light', _("Light")),
('btn-outline-dark', _("Dark")),
('btn-outline-link', _("Link")),
]
BUTTON_SIZES = [
('btn-lg', _("Large button")),
('', _("Default button")),
('btn-sm', _("Small button")),
]
link_content = CharField(
required=False,
label=_("Button Content"),
widget=widgets.TextInput(attrs={'size': 50}),
)
button_type = ChoiceField(
label=_("Button Type"),
widget=ButtonTypeWidget(choices=BUTTON_TYPES),
choices=BUTTON_TYPES,
initial='btn-primary',
help_text=_("Display Link using this Button Style")
)
button_size = ChoiceField(
label=_("Button Size"),
widget=ButtonSizeWidget(choices=BUTTON_SIZES),
choices=BUTTON_SIZES,
initial='',
required=False,
help_text=_("Display Link using this Button Size")
)
button_options = MultipleChoiceField(
label=_("Button Options"),
choices=[
('btn-block', _('Block level')),
('disabled', _('Disabled')),
],
required=False,
widget=widgets.CheckboxSelectMultiple,
)
stretched_link = BooleanField(
label=_("Stretched link"),
required=False,
help_text=_("Stretched-link utility to make any anchor the size of it’s nearest position: " \
"relative parent, perfect for entirely clickable cards!")
)
icon_align = ChoiceField(
label=_("Icon alignment"),
choices=[
('icon-left', _("Icon placed left")),
('icon-right', _("Icon placed right")),
],
widget=widgets.RadioSelect,
initial='icon-right',
help_text=_("Add an Icon before or after the button content."),
)
class Meta:
entangled_fields = {'glossary': ['link_content', 'button_type', 'button_size', 'button_options', 'icon_align',
'stretched_link']}
class BootstrapButtonMixin(IconPluginMixin):
require_parent = True
parent_classes = ['BootstrapColumnPlugin', 'SimpleWrapperPlugin']
render_template = 'cascade/bootstrap4/button.html'
allow_children = False
default_css_class = 'btn'
default_css_attributes = ['button_type', 'button_size', 'button_options', 'stretched_link']
ring_plugin = 'ButtonMixin'
class Media:
css = {'all': ['cascade/css/admin/bootstrap4-buttons.css', 'cascade/css/admin/iconplugin.css']}
js = ['admin/js/jquery.init.js', 'cascade/js/admin/buttonmixin.js']
def render(self, context, instance, placeholder):
context = super().render(context, instance, placeholder)
if 'icon_font_class' in context:
mini_template = '{0}<i class="{1} {2}" aria-hidden="true"></i>{3}'
icon_align = instance.glossary.get('icon_align')
if icon_align == 'icon-left':
context['icon_left'] = format_html(mini_template, '', context['icon_font_class'], 'cascade-icon-left',
' ')
elif icon_align == 'icon-right':
context['icon_right'] = format_html(mini_template, ' ', context['icon_font_class'],
'cascade-icon-right', '')
return context
class BootstrapButtonFormMixin(LinkFormMixin, IconFormMixin, ButtonFormMixin):
require_link = False
require_icon = False
class BootstrapButtonPlugin(BootstrapButtonMixin, LinkPluginBase):
module = 'Bootstrap'
name = _("Button")
model_mixins = (LinkElementMixin,)
form = BootstrapButtonFormMixin
ring_plugin = 'ButtonPlugin'
DEFAULT_BUTTON_ATTRIBUTES = {'role': 'button'}
class Media:
js = ['admin/js/jquery.init.js', 'cascade/js/admin/buttonplugin.js']
@classmethod
def get_identifier(cls, instance):
content = instance.glossary.get('link_content')
if not content:
try:
button_types = dict(ButtonFormMixin.BUTTON_TYPES)
content = str(button_types[instance.glossary['button_type']])
except KeyError:
content = _("Empty")
return content
@classmethod
def get_css_classes(cls, obj):
css_classes = cls.super(BootstrapButtonPlugin, cls).get_css_classes(obj)
if obj.glossary.get('stretched_link'):
css_classes.append('stretched_link')
return css_classes
@classmethod
def get_html_tag_attributes(cls, obj):
attributes = cls.super(BootstrapButtonPlugin, cls).get_html_tag_attributes(obj)
attributes.update(cls.DEFAULT_BUTTON_ATTRIBUTES)
return attributes
def render(self, context, instance, placeholder):
context = self.super(BootstrapButtonPlugin, self).render(context, instance, placeholder)
return context
plugin_pool.register_plugin(BootstrapButtonPlugin)
| {
"content_hash": "2239a0acc830cffa22e83369d7545b15",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 118,
"avg_line_length": 36.34615384615385,
"alnum_prop": 0.6111866969009826,
"repo_name": "jrief/djangocms-cascade",
"id": "0e8e1e4c19e8b9159eca56e8a5607aec6f86da53",
"size": "6617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_cascade/bootstrap4/buttons.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20152"
},
{
"name": "HTML",
"bytes": "30924"
},
{
"name": "JavaScript",
"bytes": "106622"
},
{
"name": "Python",
"bytes": "424314"
}
],
"symlink_target": ""
} |
"""Response parsers for the various protocol types.
The module contains classes that can take an HTTP response, and given
an output shape, parse the response into a dict according to the
rules in the output shape.
There are many similarities amongst the different protocols with regard
to response parsing, and the code is structured in a way to avoid
code duplication when possible. The diagram below is a diagram
showing the inheritance hierarchy of the response classes.
::
+--------------+
|ResponseParser|
+--------------+
^ ^ ^
+--------------------+ | +-------------------+
| | |
+----------+----------+ +------+-------+ +-------+------+
|BaseXMLResponseParser| |BaseRestParser| |BaseJSONParser|
+---------------------+ +--------------+ +--------------+
^ ^ ^ ^ ^ ^
| | | | | |
| | | | | |
| ++----------+-+ +-+-----------++ |
| |RestXMLParser| |RestJSONParser| |
+-----+-----+ +-------------+ +--------------+ +----+-----+
|QueryParser| |JSONParser|
+-----------+ +----------+
The diagram above shows that there is a base class, ``ResponseParser`` that
contains logic that is similar amongst all the different protocols (``query``,
``json``, ``rest-json``, ``rest-xml``). Amongst the various services there
is shared logic that can be grouped several ways:
* The ``query`` and ``rest-xml`` both have XML bodies that are parsed in the
same way.
* The ``json`` and ``rest-json`` protocols both have JSON bodies that are
parsed in the same way.
* The ``rest-json`` and ``rest-xml`` protocols have additional attributes
besides body parameters that are parsed the same (headers, query string,
status code).
This is reflected in the class diagram above. The ``BaseXMLResponseParser``
and the BaseJSONParser contain logic for parsing the XML/JSON body,
and the BaseRestParser contains logic for parsing out attributes that
come from other parts of the HTTP response. Classes like the
``RestXMLParser`` inherit from the ``BaseXMLResponseParser`` to get the
XML body parsing logic and the ``BaseRestParser`` to get the HTTP
header/status code/query string parsing.
Return Values
=============
Each call to ``parse()`` returns a dict has this form::
Standard Response
{
"ResponseMetadata": {"RequestId": <requestid>}
<response keys>
}
Error response
{
"ResponseMetadata": {"RequestId": <requestid>}
"Error": {
"Code": <string>,
"Message": <string>,
"Type": <string>,
<additional keys>
}
}
"""
import re
import base64
import json
import xml.etree.cElementTree
import logging
from botocore.compat import six, XMLParseError
from botocore.utils import parse_timestamp, merge_dicts
LOG = logging.getLogger(__name__)
DEFAULT_TIMESTAMP_PARSER = parse_timestamp
class ResponseParserFactory(object):
def __init__(self):
self._defaults = {}
def set_parser_defaults(self, **kwargs):
"""Set default arguments when a parser instance is created.
You can specify any kwargs that are allowed by a ResponseParser
class. There are currently two arguments:
* timestamp_parser - A callable that can parse a timetsamp string
* blob_parser - A callable that can parse a blob type
"""
self._defaults.update(kwargs)
def create_parser(self, protocol_name):
parser_cls = PROTOCOL_PARSERS[protocol_name]
return parser_cls(**self._defaults)
def create_parser(protocol):
return ResponseParserFactory().create_parser(protocol)
def _text_content(func):
# This decorator hides the difference between
# an XML node with text or a plain string. It's used
# to ensure that scalar processing operates only on text
# strings, which allows the same scalar handlers to be used
# for XML nodes from the body and HTTP headers.
def _get_text_content(self, shape, node_or_string):
if hasattr(node_or_string, 'text'):
text = node_or_string.text
if text is None:
# If an XML node is empty <foo></foo>,
# we want to parse that as an empty string,
# not as a null/None value.
text = ''
else:
text = node_or_string
return func(self, shape, text)
return _get_text_content
class ResponseParserError(Exception):
pass
class ResponseParser(object):
"""Base class for response parsing.
This class represents the interface that all ResponseParsers for the
various protocols must implement.
This class will take an HTTP response and a model shape and parse the
HTTP response into a dictionary.
There is a single public method exposed: ``parse``. See the ``parse``
docstring for more info.
"""
DEFAULT_ENCODING = 'utf-8'
def __init__(self, timestamp_parser=None, blob_parser=None):
if timestamp_parser is None:
timestamp_parser = DEFAULT_TIMESTAMP_PARSER
self._timestamp_parser = timestamp_parser
if blob_parser is None:
blob_parser = self._default_blob_parser
self._blob_parser = blob_parser
def _default_blob_parser(self, value):
# Blobs are always returned as bytes type (this matters on python3).
# We don't decode this to a str because it's entirely possible that the
# blob contains binary data that actually can't be decoded.
return base64.b64decode(value)
def parse(self, response, shape):
"""Parse the HTTP response given a shape.
:param response: The HTTP response dictionary. This is a dictionary
that represents the HTTP request. The dictionary must have the
following keys, ``body``, ``headers``, and ``status_code``.
:param shape: The model shape describing the expected output.
:return: Returns a dictionary representing the parsed response
described by the model. In addition to the shape described from
the model, each response will also have a ``ResponseMetadata``
which contains metadata about the response, which contains at least
two keys containing ``RequestId`` and ``HTTPStatusCode``. Some
responses may populate additional keys, but ``RequestId`` will
always be present.
"""
LOG.debug('Response headers: %s', response['headers'])
LOG.debug('Response body:\n%s', response['body'])
if response['status_code'] >= 301:
parsed = self._do_error_parse(response, shape)
else:
parsed = self._do_parse(response, shape)
# Inject HTTPStatusCode key in the response metadata if the
# response metadata exists.
if isinstance(parsed, dict) and 'ResponseMetadata' in parsed:
parsed['ResponseMetadata']['HTTPStatusCode'] = (
response['status_code'])
return parsed
def _do_parse(self, response, shape):
raise NotImplementedError("%s._do_parse" % self.__class__.__name__)
def _do_error_parse(self, response, shape):
raise NotImplementedError(
"%s._do_error_parse" % self.__class__.__name__)
def _parse_shape(self, shape, node):
handler = getattr(self, '_handle_%s' % shape.type_name,
self._default_handle)
return handler(shape, node)
def _handle_list(self, shape, node):
# Enough implementations share list serialization that it's moved
# up here in the base class.
parsed = []
member_shape = shape.member
for item in node:
parsed.append(self._parse_shape(member_shape, item))
return parsed
def _default_handle(self, shape, value):
return value
class BaseXMLResponseParser(ResponseParser):
def __init__(self, timestamp_parser=None, blob_parser=None):
super(BaseXMLResponseParser, self).__init__(timestamp_parser,
blob_parser)
self._namespace_re = re.compile('{.*}')
def _handle_map(self, shape, node):
parsed = {}
key_shape = shape.key
value_shape = shape.value
key_location_name = key_shape.serialization.get('name') or 'key'
value_location_name = value_shape.serialization.get('name') or 'value'
if shape.serialization.get('flattened') and not isinstance(node, list):
node = [node]
for keyval_node in node:
for single_pair in keyval_node:
# Within each <entry> there's a <key> and a <value>
tag_name = self._node_tag(single_pair)
if tag_name == key_location_name:
key_name = self._parse_shape(key_shape, single_pair)
elif tag_name == value_location_name:
val_name = self._parse_shape(value_shape, single_pair)
else:
raise ResponseParserError("Unknown tag: %s" % tag_name)
parsed[key_name] = val_name
return parsed
def _node_tag(self, node):
return self._namespace_re.sub('', node.tag)
def _handle_list(self, shape, node):
# When we use _build_name_to_xml_node, repeated elements are aggregated
# into a list. However, we can't tell the difference between a scalar
# value and a single element flattened list. So before calling the
# real _handle_list, we know that "node" should actually be a list if
# it's flattened, and if it's not, then we make it a one element list.
if shape.serialization.get('flattened') and not isinstance(node, list):
node = [node]
return super(BaseXMLResponseParser, self)._handle_list(shape, node)
def _handle_structure(self, shape, node):
parsed = {}
members = shape.members
xml_dict = self._build_name_to_xml_node(node)
for member_name in members:
member_shape = members[member_name]
if 'location' in member_shape.serialization:
# All members with locations have already been handled,
# so we don't need to parse these members.
continue
xml_name = self._member_key_name(member_shape, member_name)
member_node = xml_dict.get(xml_name)
if member_node is not None:
parsed[member_name] = self._parse_shape(
member_shape, member_node)
elif member_shape.serialization.get('xmlAttribute'):
attribs = {}
location_name = member_shape.serialization['name']
for key, value in node.attrib.items():
new_key = self._namespace_re.sub(
location_name.split(':')[0] + ':', key)
attribs[new_key] = value
if location_name in attribs:
parsed[member_name] = attribs[location_name]
return parsed
def _member_key_name(self, shape, member_name):
# This method is needed because we have to special case flattened list
# with a serialization name. If this is the case we use the
# locationName from the list's member shape as the key name for the
# surrounding structure.
if shape.type_name == 'list' and shape.serialization.get('flattened'):
list_member_serialized_name = shape.member.serialization.get(
'name')
if list_member_serialized_name is not None:
return list_member_serialized_name
serialized_name = shape.serialization.get('name')
if serialized_name is not None:
return serialized_name
return member_name
def _build_name_to_xml_node(self, parent_node):
# If the parent node is actually a list. We should not be trying
# to serialize it to a dictionary. Instead, return the first element
# in the list.
if isinstance(parent_node, list):
return self._build_name_to_xml_node(parent_node[0])
xml_dict = {}
for item in parent_node:
key = self._node_tag(item)
if key in xml_dict:
# If the key already exists, the most natural
# way to handle this is to aggregate repeated
# keys into a single list.
# <foo>1</foo><foo>2</foo> -> {'foo': [Node(1), Node(2)]}
if isinstance(xml_dict[key], list):
xml_dict[key].append(item)
else:
# Convert from a scalar to a list.
xml_dict[key] = [xml_dict[key], item]
else:
xml_dict[key] = item
return xml_dict
def _parse_xml_string_to_dom(self, xml_string):
try:
parser = xml.etree.cElementTree.XMLParser(
target=xml.etree.cElementTree.TreeBuilder(),
encoding=self.DEFAULT_ENCODING)
parser.feed(xml_string)
root = parser.close()
except XMLParseError as e:
raise ResponseParserError(
"Unable to parse response (%s), "
"invalid XML received:\n%s" % (e, xml_string))
return root
def _replace_nodes(self, parsed):
for key, value in parsed.items():
if value.getchildren():
sub_dict = self._build_name_to_xml_node(value)
parsed[key] = self._replace_nodes(sub_dict)
else:
parsed[key] = value.text
return parsed
@_text_content
def _handle_boolean(self, shape, text):
if text == 'true':
return True
else:
return False
@_text_content
def _handle_float(self, shape, text):
return float(text)
@_text_content
def _handle_timestamp(self, shape, text):
return self._timestamp_parser(text)
@_text_content
def _handle_integer(self, shape, text):
return int(text)
@_text_content
def _handle_string(self, shape, text):
return text
@_text_content
def _handle_blob(self, shape, text):
return self._blob_parser(text)
_handle_character = _handle_string
_handle_double = _handle_float
_handle_long = _handle_integer
class QueryParser(BaseXMLResponseParser):
def _do_error_parse(self, response, shape):
xml_contents = response['body']
root = self._parse_xml_string_to_dom(xml_contents)
parsed = self._build_name_to_xml_node(root)
self._replace_nodes(parsed)
# Once we've converted xml->dict, we need to make one or two
# more adjustments to extract nested errors and to be consistent
# with ResponseMetadata for non-error responses:
# 1. {"Errors": {"Error": {...}}} -> {"Error": {...}}
# 2. {"RequestId": "id"} -> {"ResponseMetadata": {"RequestId": "id"}}
if 'Errors' in parsed:
parsed.update(parsed.pop('Errors'))
if 'RequestId' in parsed:
parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')}
return parsed
def _do_parse(self, response, shape):
xml_contents = response['body']
root = self._parse_xml_string_to_dom(xml_contents)
parsed = {}
if shape is not None:
start = root
if 'resultWrapper' in shape.serialization:
start = self._find_result_wrapped_shape(
shape.serialization['resultWrapper'],
root)
parsed = self._parse_shape(shape, start)
self._inject_response_metadata(root, parsed)
return parsed
def _find_result_wrapped_shape(self, element_name, xml_root_node):
mapping = self._build_name_to_xml_node(xml_root_node)
return mapping[element_name]
def _inject_response_metadata(self, node, inject_into):
mapping = self._build_name_to_xml_node(node)
child_node = mapping.get('ResponseMetadata')
if child_node is not None:
sub_mapping = self._build_name_to_xml_node(child_node)
for key, value in sub_mapping.items():
sub_mapping[key] = value.text
inject_into['ResponseMetadata'] = sub_mapping
class EC2QueryParser(QueryParser):
def _inject_response_metadata(self, node, inject_into):
mapping = self._build_name_to_xml_node(node)
child_node = mapping.get('requestId')
if child_node is not None:
inject_into['ResponseMetadata'] = {'RequestId': child_node.text}
def _do_error_parse(self, response, shape):
# EC2 errors look like:
# <Response>
# <Errors>
# <Error>
# <Code>InvalidInstanceID.Malformed</Code>
# <Message>Invalid id: "1343124"</Message>
# </Error>
# </Errors>
# <RequestID>12345</RequestID>
# </Response>
# This is different from QueryParser in that it's RequestID,
# not RequestId
original = super(EC2QueryParser, self)._do_error_parse(response, shape)
original['ResponseMetadata'] = {
'RequestId': original.pop('RequestID')
}
return original
class BaseJSONParser(ResponseParser):
def _handle_structure(self, shape, value):
member_shapes = shape.members
if value is None:
# If the comes across the wire as "null" (None in python),
# we should be returning this unchanged, instead of as an
# empty dict.
return None
final_parsed = {}
for member_name in member_shapes:
member_shape = member_shapes[member_name]
json_name = member_shape.serialization.get('name', member_name)
raw_value = value.get(json_name)
if raw_value is not None:
final_parsed[member_name] = self._parse_shape(
member_shapes[member_name],
raw_value)
return final_parsed
def _handle_map(self, shape, value):
parsed = {}
key_shape = shape.key
value_shape = shape.value
for key, value in value.items():
actual_key = self._parse_shape(key_shape, key)
actual_value = self._parse_shape(value_shape, value)
parsed[actual_key] = actual_value
return parsed
def _handle_blob(self, shape, value):
return self._blob_parser(value)
def _handle_timestamp(self, shape, value):
return self._timestamp_parser(value)
def _do_error_parse(self, response, shape):
body = self._parse_body_as_json(response['body'])
error = {"Error": {"Message": '', "Code": ''}, "ResponseMetadata": {}}
# Error responses can have slightly different structures for json.
# The basic structure is:
#
# {"__type":"ConnectClientException",
# "message":"The error message."}
# The error message can either come in the 'message' or 'Message' key
# so we need to check for both.
error['Error']['Message'] = body.get('message',
body.get('Message', ''))
code = body.get('__type')
if code is not None:
# code has a couple forms as well:
# * "com.aws.dynamodb.vAPI#ProvisionedThroughputExceededException"
# * "ResourceNotFoundException"
if '#' in code:
code = code.rsplit('#', 1)[1]
error['Error']['Code'] = code
self._inject_response_metadata(error, response['headers'])
return error
def _inject_response_metadata(self, parsed, headers):
if 'x-amzn-requestid' in headers:
parsed.setdefault('ResponseMetadata', {})['RequestId'] = (
headers['x-amzn-requestid'])
def _parse_body_as_json(self, body_contents):
if not body_contents:
return {}
body = body_contents.decode(self.DEFAULT_ENCODING)
original_parsed = json.loads(body)
return original_parsed
class JSONParser(BaseJSONParser):
"""Response parse for the "json" protocol."""
def _do_parse(self, response, shape):
# The json.loads() gives us the primitive JSON types,
# but we need to traverse the parsed JSON data to convert
# to richer types (blobs, timestamps, etc.
parsed = {}
if shape is not None:
original_parsed = self._parse_body_as_json(response['body'])
parsed = self._parse_shape(shape, original_parsed)
self._inject_response_metadata(parsed, response['headers'])
return parsed
class BaseRestParser(ResponseParser):
def _do_parse(self, response, shape):
final_parsed = {}
final_parsed['ResponseMetadata'] = self._populate_response_metadata(
response)
if shape is None:
return final_parsed
member_shapes = shape.members
self._parse_non_payload_attrs(response, shape,
member_shapes, final_parsed)
self._parse_payload(response, shape, member_shapes, final_parsed)
return final_parsed
def _populate_response_metadata(self, response):
metadata = {}
headers = response['headers']
if 'x-amzn-requestid' in headers:
metadata['RequestId'] = headers['x-amzn-requestid']
elif 'x-amz-request-id' in headers:
metadata['RequestId'] = headers['x-amz-request-id']
# HostId is what it's called whenver this value is returned
# in an XML response body, so to be consistent, we'll always
# call is HostId.
metadata['HostId'] = headers.get('x-amz-id-2', '')
return metadata
def _parse_payload(self, response, shape, member_shapes, final_parsed):
if 'payload' in shape.serialization:
# If a payload is specified in the output shape, then only that
# shape is used for the body payload.
payload_member_name = shape.serialization['payload']
body_shape = member_shapes[payload_member_name]
if body_shape.type_name in ['string', 'blob']:
# This is a stream
body = response['body']
if isinstance(body, bytes):
body = body.decode(self.DEFAULT_ENCODING)
final_parsed[payload_member_name] = body
else:
original_parsed = self._initial_body_parse(response['body'])
final_parsed[payload_member_name] = self._parse_shape(
body_shape, original_parsed)
else:
original_parsed = self._initial_body_parse(response['body'])
body_parsed = self._parse_shape(shape, original_parsed)
final_parsed.update(body_parsed)
def _parse_non_payload_attrs(self, response, shape,
member_shapes, final_parsed):
headers = response['headers']
for name in member_shapes:
member_shape = member_shapes[name]
location = member_shape.serialization.get('location')
if location is None:
continue
elif location == 'statusCode':
final_parsed[name] = self._parse_shape(
member_shape, response['status_code'])
elif location == 'headers':
final_parsed[name] = self._parse_header_map(member_shape,
headers)
elif location == 'header':
header_name = member_shape.serialization.get('name', name)
if header_name in headers:
final_parsed[name] = self._parse_shape(
member_shape, headers[header_name])
def _parse_header_map(self, shape, headers):
# Note that headers are case insensitive, so we .lower()
# all header names and header prefixes.
parsed = {}
prefix = shape.serialization.get('name', '').lower()
for header_name in headers:
if header_name.lower().startswith(prefix):
# The key name inserted into the parsed hash
# strips off the prefix.
name = header_name[len(prefix):]
parsed[name] = headers[header_name]
return parsed
def _initial_body_parse(self, body_contents):
# This method should do the initial xml/json parsing of the
# body. We we still need to walk the parsed body in order
# to convert types, but this method will do the first round
# of parsing.
raise NotImplementedError("_initial_body_parse")
class RestJSONParser(BaseRestParser, BaseJSONParser):
def _initial_body_parse(self, body_contents):
return self._parse_body_as_json(body_contents)
def _do_error_parse(self, response, shape):
error = super(RestJSONParser, self)._do_error_parse(response, shape)
self._inject_error_code(error, response)
return error
def _inject_error_code(self, error, response):
# The "Code" value can come from either a response
# header or a value in the JSON body.
body = self._initial_body_parse(response['body'])
if 'x-amzn-errortype' in response['headers']:
code = response['headers']['x-amzn-errortype']
# Could be:
# x-amzn-errortype: ValidationException:
code = code.split(':')[0]
error['Error']['Code'] = code
elif 'code' in body or 'Code' in body:
error['Error']['Code'] = body.get(
'code', body.get('Code', ''))
class RestXMLParser(BaseRestParser, BaseXMLResponseParser):
def _initial_body_parse(self, xml_string):
if not xml_string:
return xml.etree.cElementTree.Element('')
return self._parse_xml_string_to_dom(xml_string)
def _do_error_parse(self, response, shape):
# We're trying to be service agnostic here, but S3 does have a slightly
# different response structure for its errors compared to other
# rest-xml serivces (route53/cloudfront). We handle this by just
# trying to parse both forms.
# First:
# <ErrorResponse xmlns="...">
# <Error>
# <Type>Sender</Type>
# <Code>InvalidInput</Code>
# <Message>Invalid resource type: foo</Message>
# </Error>
# <RequestId>request-id</RequestId>
# </ErrorResponse>
if response['body']:
# If the body ends up being invalid xml, the xml parser should not
# blow up. It should at least try to pull information about the
# the error response from other sources like the HTTP status code.
try:
return self._parse_error_from_body(response)
except ResponseParserError as e:
LOG.debug(
'Exception caught when parsing error response body:',
exc_info=True)
return self._parse_error_from_http_status(response)
def _parse_error_from_http_status(self, response):
return {
'Error': {
'Code': str(response['status_code']),
'Message': six.moves.http_client.responses.get(
response['status_code'], ''),
},
'ResponseMetadata': {
'RequestId': response['headers'].get('x-amz-request-id', ''),
'HostId': response['headers'].get('x-amz-id-2', ''),
}
}
def _parse_error_from_body(self, response):
xml_contents = response['body']
root = self._parse_xml_string_to_dom(xml_contents)
parsed = self._build_name_to_xml_node(root)
self._replace_nodes(parsed)
if root.tag == 'Error':
# This is an S3 error response. First we'll populate the
# response metadata.
metadata = self._populate_response_metadata(response)
# The RequestId and the HostId are already in the
# ResponseMetadata, but are also duplicated in the XML
# body. We don't need these values in both places,
# we'll just remove them from the parsed XML body.
parsed.pop('RequestId', '')
parsed.pop('HostId', '')
return {'Error': parsed, 'ResponseMetadata': metadata}
elif 'RequestId' in parsed:
# Other rest-xml serivces:
parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')}
default = {'Error': {'Message': '', 'Code': ''}}
merge_dicts(default, parsed)
return default
PROTOCOL_PARSERS = {
'ec2': EC2QueryParser,
'query': QueryParser,
'json': JSONParser,
'rest-json': RestJSONParser,
'rest-xml': RestXMLParser,
}
| {
"content_hash": "6516dd9cfe972a2ef667dd7d38a5de72",
"timestamp": "",
"source": "github",
"line_count": 738,
"max_line_length": 79,
"avg_line_length": 40.082655826558266,
"alnum_prop": 0.5764511003684798,
"repo_name": "rest-of/the-deck",
"id": "9b9e1f19700573bd45874f38f02b0dc01ee36c33",
"size": "30142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lambda/lib/python2.7/site-packages/botocore/parsers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "API Blueprint",
"bytes": "3461"
},
{
"name": "CSS",
"bytes": "52779"
},
{
"name": "JavaScript",
"bytes": "15800"
},
{
"name": "Python",
"bytes": "4546415"
},
{
"name": "Shell",
"bytes": "3929"
},
{
"name": "TeX",
"bytes": "1527"
}
],
"symlink_target": ""
} |
import pygame
import random
import sortalgorithm
import threading
class SortDemo:
Margin = 20
TitleHeight = 40
Colors = {
'normal': (pygame.Color(0xAA,0xAA,0xAA), pygame.Color(0x88,0x88,0x88)),
'write': (pygame.Color(0xFF,0x88,0x88), pygame.Color(0xAA,0x44,0x44)),
'cmp': (pygame.Color(0x88,0x88,0xFF), pygame.Color(0x44,0x44,0xFF)),
'active': (pygame.Color(0x99,0x99,0x99), pygame.Color(0x88,0x88,0x88)),
'success':(pygame.Color(0xAA,0xCC,0xAA), pygame.Color(0x88,0xBB,0x88)),
'fail': (pygame.Color(0xFF,0x44,0x44), pygame.Color(0xFF,0x88,0x88)),
}
# Dibuja un arreglo con determinados estados
def draw_array(self, runner, index=0):
# Calculamos left offset y dimensiones
array = runner.array
left = self.Margin + index * (self.Margin + self.array_width)
top = self.TitleHeight + self.Margin
max_value = max(array)
height = self.array_height / len(array)
width = self.array_width / max_value
# Dibujamos el titulo
font = pygame.font.Font(None, 36)
text = font.render(runner.name, 1, (0x00, 0x00, 0x00))
textpos = text.get_rect()
textpos.centerx = left + self.array_width / 2
textpos.top = 10
self.screen.blit(text, textpos)
# Dibujamos el estado
font = pygame.font.Font(None, 16)
status = "Comparaciones = %5d | Asignaciones = %5d" % (runner.comparisons, runner.assignments)
text = font.render(status, 1, (0x00, 0x00, 0x00))
textpos = text.get_rect()
textpos.centerx = left + self.array_width / 2
textpos.top = 40
self.screen.blit(text, textpos)
# Armamos las coordenadas de los rectangulos para la pantalla, sin dibujarlos aun
screen_array = [ pygame.Rect(left, top + index*height, value * width, height) for index, value in enumerate(array) ]
# Dibujamos el margen
pygame.draw.rect(self.screen, pygame.Color(0xCC,0xCC,0xCC), [left, top, self.array_width, self.array_height], 1)
# Dibujamos el arreglo en si
for rect, status in zip(screen_array, runner.statuses):
self.draw_item(rect, status)
# Dibujamos un item con determinado color
def draw_item(self, rect, color_set='normal'):
fill, border = self.Colors[color_set]
pygame.draw.rect(self.screen, fill, rect)
pygame.draw.rect(self.screen, border, rect, 1)
def __init__(self, runs, **kwargs):
self.fps = kwargs.get('fps') or 20
self.array_width = kwargs.get('ancho') or 280
self.array_height = kwargs.get('alto') or 500
self.event = threading.Event()
self.runners = [sortalgorithm.SortAlgorithm(algorithm, list(array), self.event, name) for (algorithm, name, array) in runs]
pygame.init()
# Setup de la pantalla
self.screen = pygame.display.set_mode([self.Margin + (self.array_width + self.Margin) * len(self.runners), self.TitleHeight + self.array_height + self.Margin * 2])
pygame.display.set_caption("Sorting Algorithms Demo")
clock = pygame.time.Clock()
self.screen.fill(pygame.Color("white"))
# Dibujamos los arreglos
for index, runner in enumerate(self.runners):
self.draw_array(runner, index)
# Arrancamos los algoritmos
for runner in self.runners:
runner.start()
# Main loop: ejecutamos mientras el usuario no elija salir
finished = False
while not finished:
# Levantamos el input del usuario
for event in pygame.event.get():
# Si eligio salir, cortamos cuando termine esta iteracion del loop
if event.type == pygame.QUIT:
finished = True
# Le damos tiempo a los algoritmos para que ejecuten
for runner in self.runners:
runner.wait_for_execute_iteration()
# Limpiamos la pantalla
self.screen.fill(pygame.Color("white"))
# Dibujamos los arreglos
for index, runner in enumerate(self.runners):
self.draw_array(runner, index)
# Redibujamos
clock.tick(self.fps)
pygame.display.update()
# Los despertamos
self.event.set()
pygame.quit()
| {
"content_hash": "fea8558c1cbf1cf990d36aaf66eadb77",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 171,
"avg_line_length": 39.285714285714285,
"alnum_prop": 0.573475935828877,
"repo_name": "spalladino/algoritmos-gcba",
"id": "729905ab2e3cabd104e03fd8db9a9398b8606d08",
"size": "4675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codigo/python-sorting/sortdemo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22481"
},
{
"name": "Shell",
"bytes": "254"
},
{
"name": "TeX",
"bytes": "153920"
}
],
"symlink_target": ""
} |
import sys
import os
import datetime
import time
import subprocess
import signal
#Global settings
port = 2000
iphead = "10.10"
dagsamples = 10 * 1000 * 1000
dagtimeout = 120 #seconds
sm = 5
nwepoch = int(sys.argv[1]) #11 #((3*4000) + (240 * 256 * 8 * .1)) / 1000 #us
nwpsize = 230
pwfull = 65502
dodag = True
clientsperbox = 3
# per server settings
# (hostname, ip )
servers =[\
("quorum205" ,"1.2" ),\
#] ("quorum205" ,"0.2" ),\
# ("quorum205" ,"1.2" )\
]
#servers =[\
# ("backstroke" ,"0.10" ),\
# ("backstroke" ,"1.10" ),\
#]
# per host settings
# (hostname, ip, seqstart )
clients = [\
("quorum206" ,servers[0][1], dagsamples * sm * 0 ),\
("quorum207" ,servers[0][1], dagsamples * sm * 1 ),\
("quorum208" ,servers[0][1], dagsamples * sm * 4 ),\
("quorum301" ,servers[0][1], dagsamples * sm * 5 ),\
("hammerthrow" ,servers[0][1], dagsamples * sm * 6 ),\
("michael" ,servers[0][1], dagsamples * sm * 7 ),\
("backstroke" ,servers[0][1], dagsamples * sm * 8 ),\
("tigger-0" ,servers[0][1], dagsamples * sm * 9 ),\
("uriel" ,servers[0][1], dagsamples * sm * 10 ),\
("freestyle" ,servers[0][1], dagsamples * sm * 11 ),\
("quorum201" ,servers[0][1], dagsamples * sm * 5 )\
]
#Hold subprocess classes until we destroy everything
subs = []
msg1 = "[%s] Starting..." % (datetime.datetime.now())
print msg1
def run_remote_cmd(subs, readme, host, client_cmd):
sub_cmd = "ssh qjump@%s \"%s\"" %(host,client_cmd)
readme.write("%s\n" % (sub_cmd) )
readme.flush()
# print sub_cmd
p = subprocess.Popen(sub_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
subs.append((host,p))
# print "Done."
def run_perf(subs, readme, client_idx, priority, psize, pacing ):
(host, ip, seq_start) = clients[client_idx]
perf_cmd = "/home/qjump/qjump/qjump_fe2p_aps/fe2p_perf/bin/fe2p_perf_c --ip-port=udp:%s.%s:%s --size=%s --seq-start=%i --pacing=%i" % (iphead,ip,port,psize,seq_start,pacing)
client_cmd = "cd /home/qjump/qjump/set_sock_priority && sudo ./set_sock_priority -p %s -w 2500000 -c \\\"%s\\\"" % (priority,perf_cmd )
readme.write("%s\n%s\n" % (msg, client_cmd) )
readme.flush()
run_remote_cmd(subs, readme, host, client_cmd)
out_path = "/home/qjump/qjump_full_test/"
msg2 = "Making directory %s" % out_path
print msg2
if not os.path.exists(out_path):
os.makedirs(out_path)
#Setup the log
readme = open(out_path + "README", "w")
readme.write("%s\n%s\nStarted: %s\n" % (msg1,msg2,str(datetime.datetime.now())) )
readme.flush()
#Start the servers
for server in servers:
(host,ip) = server
msg = "[%s] Starting cluster test server on %s.%s.%s:%s ..." %(datetime.datetime.now(), host, iphead,ip, port)
print msg
sys.stdout.flush()
srv_cmd = "nc -u -l %s.%s %i > /dev/null" % (iphead,ip, port )
run_remote_cmd(subs,readme, host, srv_cmd)
#Start up the clients
for client_idx in range(0,len(clients)):
(host, ip, seq) = clients[client_idx]
#Set up the high priority client
msg = "[%s] Starting HIGH cluster test client [%i] on %s --> %s.%s:%s ..." % (datetime.datetime.now(), client_idx+1,host,iphead,ip,port)
print msg
sys.stdout.flush()
for i in range(0,clientsperbox):
run_perf(subs, readme, client_idx, 7, nwpsize, nwepoch)
#Set up the low priority client - nasty should be a function
msg = "[%s] Starting LOW cluster test client [%i] on %s --> %s.%s:%s ..." % (datetime.datetime.now(), client_idx+1,host,iphead,ip,port)
print msg
sys.stdout.flush()
#for i in range(0,clientsperbox):
run_perf(subs, readme, client_idx, 0, pwfull, 0 )
#Wait for everything to stabelize
msg = "[%s] Waiting ..." % (datetime.datetime.now())
print msg
sys.stdout.flush()
for i in range(0,5):
time.sleep(1)
print ".",
sys.stdout.flush()
print "Done."
sys.stdout.flush()
readme.write("%s\n" % (msg) )
readme.flush()
if dodag :
#Start up the DAG capture
msg = "[%s] Starting DAG capture on %s ..." %(datetime.datetime.now(), "quorum208")
print msg
sys.stdout.flush()
dag_cmd = "sudo dagconfig -s; sleep 2; sudo dagconfig -s; sleep 1; sudo ~/qjump/qjump-expr-tools/pin/pin 7 \\\"qjump/qjump-camio-tools/dag_captur2/bin/dag_capture -s %i -t 900\\\"" % (dagsamples)
sub_cmd = "ssh qjump@%s \"%s\"" %("quorum208",dag_cmd)
readme.write("%s\n%s\n" % (msg, sub_cmd) )
readme.flush()
dag_sub = subprocess.Popen(sub_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print "Done."
#Wait for the capture to complete
print "[%s] Waiting for DAG capture to finish..." %(datetime.datetime.now()),
sys.stdout.flush()
(stdout, stderr) = dag_sub.communicate()
readme.write(stdout + "\n")
readme.write(stderr + "\n")
readme.flush()
print "Done."
#Copy the DAG output
msg = "[%s] Copy the DAG output..." % (datetime.datetime.now())
sys.stdout.flush()
out_cmd = "scp qjump@%s:/tmp/dag_cap_* %s" % ("quorum208",out_path)
readme.write(out_cmd + "\n")
readme.flush()
out_sub = subprocess.Popen(out_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = out_sub.communicate()
readme.write(stdout + "\n")
readme.write( stderr + "\n")
readme.flush()
print "Done."
#kill all the clients
#for sub in subs:
for client in clients:
#Kill process on the remote box
# (host,sub) = sub
host = client[0]
print "[%s] Killing remote process on %s.." % (datetime.datetime.now(),host) ,
sys.stdout.flush()
kill_cmd = "sudo killall -9 fe2p_perf_c"
sub_cmd = "ssh qjump@%s \"%s\"" %(host,kill_cmd)
readme.write(sub_cmd + "\n")
readme.flush()
#Collect the ouput
#readme += sub.stdout.read()
#readme += sub.stderr.read()
subprocess.call(sub_cmd, shell=True)
print "Done."
for server in servers:
#Kill process on the remote box
# (host,sub) = sub
host = server[0]
print "[%s] Killing remote process on %s.." % (datetime.datetime.now(),host) ,
sys.stdout.flush()
kill_cmd = "sudo killall -9 nc"
sub_cmd = "ssh qjump@%s \"%s\"" %(host,kill_cmd)
readme.write(sub_cmd + "\n")
readme.flush()
#Collect the ouput
#readme += sub.stdout.read()
#readme += sub.stderr.read()
subprocess.call(sub_cmd, shell=True)
print "Done."
#All done
readme.write("Finished %s\n" % str(datetime.datetime.now()))
readme.flush()
readme.close()
print "[%s] Finished!\n\n" % (datetime.datetime.now())
| {
"content_hash": "62bd2c810c066dc27cbd88b7e859b0df",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 199,
"avg_line_length": 29.959821428571427,
"alnum_prop": 0.5990165400089406,
"repo_name": "camsas/qjump-expr-tools",
"id": "0aea05401c1d0d7fdc7c206f522175effb8b39db",
"size": "8301",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "factor-expr/run_factor_expr.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "182"
},
{
"name": "Python",
"bytes": "40937"
},
{
"name": "Shell",
"bytes": "13051"
}
],
"symlink_target": ""
} |
"""
IMPORTANT NOTE!
To use this module on Mac OS X, you need the PyObjC module installed.
For Python 3, run:
sudo pip3 install pyobjc-core
sudo pip3 install pyobjc
For Python 2, run:
sudo pip install pyobjc-core
sudo pip install pyobjc
(There's some bug with their installer, so install pyobjc-core first or else
the install takes forever.)
To use this module on Linux, you need Xlib module installed.
For Python 3, run:
sudo pip3 install python3-Xlib
For Python 2, run:
sudo pip install Xlib
To use this module on Windows, you do not need anything else.
You will need PIL/Pillow to use the screenshot features.
"""
from __future__ import absolute_import, division, print_function
__version__ = '0.9.35'
import collections
import sys
import time
try:
import pytweening
from pytweening import (easeInQuad, easeOutQuad, easeInOutQuad,
easeInCubic, easeOutCubic, easeInOutCubic, easeInQuart, easeOutQuart,
easeInOutQuart, easeInQuint, easeOutQuint, easeInOutQuint, easeInSine,
easeOutSine, easeInOutSine, easeInExpo, easeOutExpo, easeInOutExpo,
easeInCirc, easeOutCirc, easeInOutCirc, easeInElastic, easeOutElastic,
easeInOutElastic, easeInBack, easeOutBack, easeInOutBack, easeInBounce,
easeOutBounce, easeInOutBounce)
# getLine is not needed.
# getPointOnLine has been redefined in this file, to avoid dependency on pytweening.
# linear has also been redefined in this file.
except ImportError:
pass
try:
import pymsgbox
from pymsgbox import alert, confirm, prompt, password
except ImportError:
# If pymsgbox module is not found, those methods will not be available.
pass
try:
import pyscreeze
from pyscreeze import (center, grab, locate, locateAll, locateAllOnScreen,
locateCenterOnScreen, locateOnScreen, pixel, pixelMatchesColor,
screenshot)
except ImportError:
# If pyscreeze module is not found, screenshot-related features will simply
# not work.
pass
KEY_NAMES = ['\t', '\n', '\r', ' ', '!', '"', '#', '$', '%', '&', "'", '(',
')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`',
'a', 'b', 'c', 'd', 'e','f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~',
'accept', 'add', 'alt', 'altleft', 'altright', 'apps', 'backspace',
'browserback', 'browserfavorites', 'browserforward', 'browserhome',
'browserrefresh', 'browsersearch', 'browserstop', 'capslock', 'clear',
'convert', 'ctrl', 'ctrlleft', 'ctrlright', 'decimal', 'del', 'delete',
'divide', 'down', 'end', 'enter', 'esc', 'escape', 'execute', 'f1', 'f10',
'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f2', 'f20',
'f21', 'f22', 'f23', 'f24', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9',
'final', 'fn', 'hanguel', 'hangul', 'hanja', 'help', 'home', 'insert', 'junja',
'kana', 'kanji', 'launchapp1', 'launchapp2', 'launchmail',
'launchmediaselect', 'left', 'modechange', 'multiply', 'nexttrack',
'nonconvert', 'num0', 'num1', 'num2', 'num3', 'num4', 'num5', 'num6',
'num7', 'num8', 'num9', 'numlock', 'pagedown', 'pageup', 'pause', 'pgdn',
'pgup', 'playpause', 'prevtrack', 'print', 'printscreen', 'prntscrn',
'prtsc', 'prtscr', 'return', 'right', 'scrolllock', 'select', 'separator',
'shift', 'shiftleft', 'shiftright', 'sleep', 'space', 'stop', 'subtract', 'tab',
'up', 'volumedown', 'volumemute', 'volumeup', 'win', 'winleft', 'winright', 'yen',
'command', 'option', 'optionleft', 'optionright']
KEYBOARD_KEYS = KEY_NAMES # keeping old KEYBOARD_KEYS for backwards compatibility
def isShiftCharacter(character):
"""Returns True if the key character is uppercase or shifted."""
return character.isupper() or character in '~!@#$%^&*()_+{}|:"<>?'
# The platformModule is where we reference the platform-specific functions.
if sys.platform.startswith('java'):
#from . import _pyautogui_java as platformModule
raise NotImplementedError('Jython is not yet supported by PyAutoGUI.')
elif sys.platform == 'darwin':
from . import _pyautogui_osx as platformModule
elif sys.platform == 'win32':
from . import _pyautogui_win as platformModule
from ._window_win import Window, getWindows, getWindow
else:
from . import _pyautogui_x11 as platformModule
# TODO: Having module-wide user-writable global variables is bad. It makes
# restructuring the code very difficult. For instance, what if we decide to
# move the mouse-related functions to a separate file (a submodule)? How that
# file will access this module vars? It will probably lead to a circular
# import.
# In seconds. Any duration less than this is rounded to 0.0 to instantly move
# the mouse.
MINIMUM_DURATION = 0.1
# If sleep_amount is too short, time.sleep() will be a no-op and the mouse
# cursor moves there instantly.
# TODO: This value should vary with the platform. http://stackoverflow.com/q/1133857
MINIMUM_SLEEP = 0.05
PAUSE = 0.1 # The number of seconds to pause after EVERY public function call. Useful for debugging.
FAILSAFE = True
# General Functions
# =================
def getPointOnLine(x1, y1, x2, y2, n):
"""Returns the (x, y) tuple of the point that has progressed a proportion
n along the line defined by the two x, y coordinates.
Copied from pytweening module.
"""
x = ((x2 - x1) * n) + x1
y = ((y2 - y1) * n) + y1
return (x, y)
def linear(n):
"""Trivial linear tweening function.
Copied from pytweening module.
"""
if not 0.0 <= n <= 1.0:
raise ValueError('Argument must be between 0.0 and 1.0.')
return n
def _autoPause(pause, _pause):
if _pause:
if pause is not None:
time.sleep(pause)
elif PAUSE != 0:
time.sleep(PAUSE)
def _unpackXY(x, y):
"""If x is a sequence and y is None, returns x[0], y[0]. Else, returns x, y.
On functions that receive a pair of x,y coordinates, they can be passed as
separate arguments, or as a single two-element sequence.
"""
if isinstance(x, collections.Sequence):
if len(x) == 2:
if y is None:
x, y = x
else:
raise ValueError('When passing a sequence at the x argument, the y argument must not be passed (received {0}).'.format(repr(y)))
else:
raise ValueError('The supplied sequence must have exactly 2 elements ({0} were received).'.format(len(x)))
else:
pass
return x, y
def position(x=None, y=None):
"""Returns the current xy coordinates of the mouse cursor as a two-integer
tuple.
Args:
x (int, None, optional) - If not None, this argument overrides the x in
the return value.
y (int, None, optional) - If not None, this argument overrides the y in
the return value.
Returns:
(x, y) tuple of the current xy coordinates of the mouse cursor.
"""
posx, posy = platformModule._position()
posx = int(posx)
posy = int(posy)
if x is not None:
posx = int(x)
if y is not None:
posy = int(y)
return posx, posy
def size():
"""Returns the width and height of the screen as a two-integer tuple.
Returns:
(width, height) tuple of the screen size, in pixels.
"""
return platformModule._size()
def onScreen(x, y=None):
"""Returns whether the given xy coordinates are on the screen or not.
Args:
Either the arguments are two separate values, first arg for x and second
for y, or there is a single argument of a sequence with two values, the
first x and the second y.
Example: onScreen(x, y) or onScreen([x, y])
Returns:
bool: True if the xy coordinates are on the screen at its current
resolution, otherwise False.
"""
x, y = _unpackXY(x, y)
x = int(x)
y = int(y)
width, height = platformModule._size()
return 0 <= x < width and 0 <= y < height
# Mouse Functions
# ===============
def mouseDown(x=None, y=None, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs pressing a mouse button down (but not up).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
mouse down happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
mouse down happens. None by default.
button (str, int, optional): The mouse button pressed down. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, or 3
"""
if button not in ('left', 'middle', 'right', 1, 2, 3):
raise ValueError("button argument must be one of ('left', 'middle', 'right', 1, 2, 3), not %s" % button)
_failSafeCheck()
x, y = _unpackXY(x, y)
_mouseMoveDrag('move', x, y, 0, 0, duration=0, tween=None)
x, y = platformModule._position() # TODO - this isn't right. We need to check the params.
if button == 1 or str(button).lower() == 'left':
platformModule._mouseDown(x, y, 'left')
elif button == 2 or str(button).lower() == 'middle':
platformModule._mouseDown(x, y, 'middle')
elif button == 3 or str(button).lower() == 'right':
platformModule._mouseDown(x, y, 'right')
_autoPause(pause, _pause)
def mouseUp(x=None, y=None, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs releasing a mouse button up (but not down beforehand).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
mouse up happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
mouse up happens. None by default.
button (str, int, optional): The mouse button released. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, or 3
"""
if button not in ('left', 'middle', 'right', 1, 2, 3):
raise ValueError("button argument must be one of ('left', 'middle', 'right', 1, 2, 3), not %s" % button)
_failSafeCheck()
x, y = _unpackXY(x, y)
_mouseMoveDrag('move', x, y, 0, 0, duration=0, tween=None)
x, y = platformModule._position()
if button == 1 or str(button).lower() == 'left':
platformModule._mouseUp(x, y, 'left')
elif button == 2 or str(button).lower() == 'middle':
platformModule._mouseUp(x, y, 'middle')
elif button == 3 or str(button).lower() == 'right':
platformModule._mouseUp(x, y, 'right')
_autoPause(pause, _pause)
def click(x=None, y=None, clicks=1, interval=0.0, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs pressing a mouse button down and then immediately releasing it.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where
the click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
clicks (int, optional): The number of clicks to perform. 1 by default.
For example, passing 2 would do a doubleclick.
interval (float, optional): The number of seconds in between each click,
if the number of clicks is greater than 1. 0.0 by default, for no
pause in between clicks.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, 3
"""
if button not in ('left', 'middle', 'right', 1, 2, 3):
raise ValueError("button argument must be one of ('left', 'middle', 'right', 1, 2, 3)")
_failSafeCheck()
x, y = _unpackXY(x, y)
_mouseMoveDrag('move', x, y, 0, 0, duration, tween)
x, y = platformModule._position()
for i in range(clicks):
_failSafeCheck()
if button == 1 or str(button).lower() == 'left':
platformModule._click(x, y, 'left')
elif button == 2 or str(button).lower() == 'middle':
platformModule._click(x, y, 'middle')
elif button == 3 or str(button).lower() == 'right':
platformModule._click(x, y, 'right')
else:
# These mouse buttons for hor. and vert. scrolling only apply to x11:
platformModule._click(x, y, button)
time.sleep(interval)
_autoPause(pause, _pause)
def rightClick(x=None, y=None, duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs a right mouse button click.
This is a wrapper function for click('right', x, y).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
Returns:
None
"""
_failSafeCheck()
click(x, y, 1, 0.0, 'right', _pause=False)
_autoPause(pause, _pause)
def middleClick(x=None, y=None, duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs a middle mouse button click.
This is a wrapper function for click('right', x, y).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
Returns:
None
"""
_failSafeCheck()
click(x, y, 1, 0.0, 'middle', _pause=False)
_autoPause(pause, _pause)
def doubleClick(x=None, y=None, interval=0.0, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs a double click.
This is a wrapper function for click('left', x, y, 2, interval).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
interval (float, optional): The number of seconds in between each click,
if the number of clicks is greater than 1. 0.0 by default, for no
pause in between clicks.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, 3, 4,
5, 6, or 7
"""
_failSafeCheck()
click(x, y, 2, interval, button, _pause=False)
_autoPause(pause, _pause)
def tripleClick(x=None, y=None, interval=0.0, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs a triple click..
This is a wrapper function for click('left', x, y, 3, interval).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
interval (float, optional): The number of seconds in between each click,
if the number of clicks is greater than 1. 0.0 by default, for no
pause in between clicks.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, 3, 4,
5, 6, or 7
"""
_failSafeCheck()
click(x, y, 3, interval, button, _pause=False)
_autoPause(pause, _pause)
def scroll(clicks, x=None, y=None, pause=None, _pause=True):
"""Performs a scroll of the mouse scroll wheel.
Whether this is a vertical or horizontal scroll depends on the underlying
operating system.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
clicks (int, float): The amount of scrolling to perform.
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
Returns:
None
"""
_failSafeCheck()
if type(x) in (tuple, list):
x, y = x[0], x[1]
x, y = position(x, y)
platformModule._scroll(clicks, x, y)
_autoPause(pause, _pause)
def hscroll(clicks, x=None, y=None, pause=None, _pause=True):
"""Performs an explicitly horizontal scroll of the mouse scroll wheel,
if this is supported by the operating system. (Currently just Linux.)
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
clicks (int, float): The amount of scrolling to perform.
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
Returns:
None
"""
_failSafeCheck()
if type(x) in (tuple, list):
x, y = x[0], x[1]
x, y = position(x, y)
platformModule._hscroll(clicks, x, y)
_autoPause(pause, _pause)
def vscroll(clicks, x=None, y=None, pause=None, _pause=True):
"""Performs an explicitly vertical scroll of the mouse scroll wheel,
if this is supported by the operating system. (Currently just Linux.)
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
clicks (int, float): The amount of scrolling to perform.
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
Returns:
None
"""
_failSafeCheck()
if type(x) in (tuple, list):
x, y = x[0], x[1]
x, y = position(x, y)
platformModule._vscroll(clicks, x, y)
_autoPause(pause, _pause)
def moveTo(x=None, y=None, duration=0.0, tween=linear, pause=None, _pause=True):
"""Moves the mouse cursor to a point on the screen.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
Returns:
None
"""
x, y = _unpackXY(x, y)
_failSafeCheck()
_mouseMoveDrag('move', x, y, 0, 0, duration, tween)
_autoPause(pause, _pause)
def moveRel(xOffset=None, yOffset=None, duration=0.0, tween=linear, pause=None, _pause=True):
"""Moves the mouse cursor to a point on the screen, relative to its current
position.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default. If tuple, this is used for x and y.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
Returns:
None
"""
xOffset, yOffset = _unpackXY(xOffset, yOffset)
_failSafeCheck()
_mouseMoveDrag('move', None, None, xOffset, yOffset, duration, tween)
_autoPause(pause, _pause)
def dragTo(x=None, y=None, duration=0.0, tween=linear, button='left', pause=None, _pause=True):
"""Performs a mouse drag (mouse movement while a button is held down) to a
point on the screen.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default. If tuple, this is used for x and y.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
"""
_failSafeCheck()
if type(x) in (tuple, list):
x, y = x[0], x[1]
mouseDown(button=button, _pause=False)
_mouseMoveDrag('drag', x, y, 0, 0, duration, tween, button)
mouseUp(button=button, _pause=False)
_autoPause(pause, _pause)
def dragRel(xOffset=0, yOffset=0, duration=0.0, tween=linear, button='left', pause=None, _pause=True):
"""Performs a mouse drag (mouse movement while a button is held down) to a
point on the screen, relative to its current position.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default. If tuple, this is used for xOffset and yOffset.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
"""
if xOffset is None:
xOffset = 0
if yOffset is None:
yOffset = 0
if type(xOffset) in (tuple, list):
xOffset, yOffset = xOffset[0], xOffset[1]
if xOffset == 0 and yOffset == 0:
return # no-op case
_failSafeCheck()
mousex, mousey = platformModule._position()
mouseDown(button=button, _pause=False)
_mouseMoveDrag('drag', mousex, mousey, xOffset, yOffset, duration, tween, button)
mouseUp(button=button, _pause=False)
_autoPause(pause, _pause)
def _mouseMoveDrag(moveOrDrag, x, y, xOffset, yOffset, duration, tween=linear, button=None):
"""Handles the actual move or drag event, since different platforms
implement them differently.
On Windows & Linux, a drag is a normal mouse move while a mouse button is
held down. On OS X, a distinct "drag" event must be used instead.
The code for moving and dragging the mouse is similar, so this function
handles both. Users should call the moveTo() or dragTo() functions instead
of calling _mouseMoveDrag().
Args:
moveOrDrag (str): Either 'move' or 'drag', for the type of action this is.
x (int, float, None, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
xOffset (int, float, None, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default.
yOffset (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
"""
# The move and drag code is similar, but OS X requires a special drag event instead of just a move event when dragging.
# See https://stackoverflow.com/a/2696107/1893164
assert moveOrDrag in ('move', 'drag'), "moveOrDrag must be in ('move', 'drag'), not %s" % (moveOrDrag)
if sys.platform != 'darwin':
moveOrDrag = 'move' # Only OS X needs the drag event specifically.
xOffset = int(xOffset) if xOffset is not None else 0
yOffset = int(yOffset) if yOffset is not None else 0
if x is None and y is None and xOffset == 0 and yOffset == 0:
return # Special case for no mouse movement at all.
startx, starty = position()
x = int(x) if x is not None else startx
y = int(y) if y is not None else starty
# x, y, xOffset, yOffset are now int.
x += xOffset
y += yOffset
width, height = size()
# Make sure x and y are within the screen bounds.
x = max(0, min(x, width - 1))
y = max(0, min(y, height - 1))
# If the duration is small enough, just move the cursor there instantly.
steps = [(x, y)]
if duration > MINIMUM_DURATION:
# Non-instant moving/dragging involves tweening:
num_steps = max(width, height)
sleep_amount = duration / num_steps
if sleep_amount < MINIMUM_SLEEP:
num_steps = int(duration / MINIMUM_SLEEP)
sleep_amount = duration / num_steps
steps = [
getPointOnLine(startx, starty, x, y, tween(n / num_steps))
for n in range(num_steps)
]
# Making sure the last position is the actual destination.
steps.append((x, y))
for tweenX, tweenY in steps:
if len(steps) > 1:
# A single step does not require tweening.
time.sleep(sleep_amount)
_failSafeCheck()
tweenX = int(round(tweenX))
tweenY = int(round(tweenY))
if moveOrDrag == 'move':
platformModule._moveTo(tweenX, tweenY)
elif moveOrDrag == 'drag':
platformModule._dragTo(tweenX, tweenY, button)
else:
raise NotImplementedError('Unknown value of moveOrDrag: {0}'.format(moveOrDrag))
_failSafeCheck()
# Keyboard Functions
# ==================
def isValidKey(key):
"""Returns a Boolean value if the given key is a valid value to pass to
PyAutoGUI's keyboard-related functions for the current platform.
This function is here because passing an invalid value to the PyAutoGUI
keyboard functions currently is a no-op that does not raise an exception.
Some keys are only valid on some platforms. For example, while 'esc' is
valid for the Escape key on all platforms, 'browserback' is only used on
Windows operating systems.
Args:
key (str): The key value.
Returns:
bool: True if key is a valid value, False if not.
"""
return platformModule.keyboardMapping.get(key, None) != None
def keyDown(key, pause=None, _pause=True):
"""Performs a keyboard key press without the release. This will put that
key in a held down state.
NOTE: For some reason, this does not seem to cause key repeats like would
happen if a keyboard key was held down on a text field.
Args:
key (str): The key to be pressed down. The valid names are listed in
KEYBOARD_KEYS.
Returns:
None
"""
if len(key) > 1:
key = key.lower()
_failSafeCheck()
platformModule._keyDown(key)
_autoPause(pause, _pause)
def keyUp(key, pause=None, _pause=True):
"""Performs a keyboard key release (without the press down beforehand).
Args:
key (str): The key to be released up. The valid names are listed in
KEYBOARD_KEYS.
Returns:
None
"""
if len(key) > 1:
key = key.lower()
_failSafeCheck()
platformModule._keyUp(key)
_autoPause(pause, _pause)
def press(keys, presses=1, interval=0.0, pause=None, _pause=True):
"""Performs a keyboard key press down, followed by a release.
Args:
key (str, list): The key to be pressed. The valid names are listed in
KEYBOARD_KEYS. Can also be a list of such strings.
presses (integer, optiional): the number of press repetition
1 by default, for just one press
interval (float, optional): How many seconds between each press.
0.0 by default, for no pause between presses.
pause (float, optional): How many seconds in the end of function process.
None by default, for no pause in the end of function process.
Returns:
None
"""
if type(keys) == str:
keys = [keys] # put string in a list
else:
lowerKeys = []
for s in keys:
if len(s) > 1:
lowerKeys.append(s.lower())
else:
lowerKeys.append(s)
interval = float(interval)
for i in range(presses):
for k in keys:
_failSafeCheck()
platformModule._keyDown(k)
platformModule._keyUp(k)
time.sleep(interval)
_autoPause(pause, _pause)
def typewrite(message, interval=0.0, pause=None, _pause=True):
"""Performs a keyboard key press down, followed by a release, for each of
the characters in message.
The message argument can also be list of strings, in which case any valid
keyboard name can be used.
Since this performs a sequence of keyboard presses and does not hold down
keys, it cannot be used to perform keyboard shortcuts. Use the hotkey()
function for that.
Args:
message (str, list): If a string, then the characters to be pressed. If a
list, then the key names of the keys to press in order. The valid names
are listed in KEYBOARD_KEYS.
interval (float, optional): The number of seconds in between each press.
0.0 by default, for no pause in between presses.
Returns:
None
"""
interval = float(interval)
_failSafeCheck()
for c in message:
if len(c) > 1:
c = c.lower()
press(c, _pause=False)
time.sleep(interval)
_failSafeCheck()
_autoPause(pause, _pause)
def hotkey(*args, **kwargs):
"""Performs key down presses on the arguments passed in order, then performs
key releases in reverse order.
The effect is that calling hotkey('ctrl', 'shift', 'c') would perform a
"Ctrl-Shift-C" hotkey/keyboard shortcut press.
Args:
key(s) (str): The series of keys to press, in order. This can also be a
list of key strings to press.
interval (float, optional): The number of seconds in between each press.
0.0 by default, for no pause in between presses.
Returns:
None
"""
interval = float(kwargs.get('interval', 0.0))
_failSafeCheck()
for c in args:
if len(c) > 1:
c = c.lower()
platformModule._keyDown(c)
time.sleep(interval)
for c in reversed(args):
if len(c) > 1:
c = c.lower()
platformModule._keyUp(c)
time.sleep(interval)
_autoPause(kwargs.get('pause', None), kwargs.get('_pause', True))
class FailSafeException(Exception):
pass
def _failSafeCheck():
if FAILSAFE and position() == (0, 0):
raise FailSafeException('PyAutoGUI fail-safe triggered from mouse moving to upper-left corner. To disable this fail-safe, set pyautogui.FAILSAFE to False.')
def displayMousePosition(xOffset=0, yOffset=0):
"""This function is meant to be run from the command line. It will
automatically display the location and RGB of the mouse cursor."""
print('Press Ctrl-C to quit.')
if xOffset != 0 or yOffset != 0:
print('xOffset: %s yOffset: %s' % (xOffset, yOffset))
resolution = size()
try:
while True:
# Get and print the mouse coordinates.
x, y = position()
positionStr = 'X: ' + str(x - xOffset).rjust(4) + ' Y: ' + str(y - yOffset).rjust(4)
if (x - xOffset) < 0 or (y - yOffset) < 0 or (x - xOffset) >= resolution[0] or (y - yOffset) >= resolution[1]:
pixelColor = ('NaN', 'NaN', 'NaN')
else:
pixelColor = pyscreeze.screenshot().getpixel((x, y))
positionStr += ' RGB: (' + str(pixelColor[0]).rjust(3)
positionStr += ', ' + str(pixelColor[1]).rjust(3)
positionStr += ', ' + str(pixelColor[2]).rjust(3) + ')'
sys.stdout.write(positionStr)
sys.stdout.write('\b' * len(positionStr))
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.write('\n')
sys.stdout.flush()
| {
"content_hash": "86e189a5ce5a99d59eb7b6ea613fe7b5",
"timestamp": "",
"source": "github",
"line_count": 1033,
"max_line_length": 164,
"avg_line_length": 36.594385285575996,
"alnum_prop": 0.6381408391090418,
"repo_name": "RPing/input-paste",
"id": "9ab1d735b0583fb6de252c7635168079365a5199",
"size": "37950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyautogui/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "235"
},
{
"name": "Python",
"bytes": "56465"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import sys
import time
from subprocess import Popen, PIPE
__doc__ = """
pfa2txt v2.0 - Feb 03 2016
This script takes a path to a folder as input, finds all the Type 1 fonts
(.pfa files) inside that folder and its subdirectories, and converts them
to plain text Type 1 fonts (.txt files; the Private and CharStrings
dictionaries are not encrypted). If a path is not provided, the script will
use the current path as the top-most directory.
==================================================
Versions:
v1.0 - Apr 08 2013 - Initial release
v2.0 - Feb 03 2016 - Modernized.
"""
def getFontPaths(path):
fontsList = []
for r, folders, files in os.walk(path):
for file in files:
fileName, extension = os.path.splitext(file)
extension = extension.lower()
if extension == ".pfa":
fontsList.append(os.path.join(r, file))
return fontsList
def doTask(fonts):
totalFonts = len(fonts)
print("%d fonts found" % totalFonts)
i = 1
for font in fonts:
folderPath, fontFileName = os.path.split(font)
styleName = os.path.basename(folderPath)
# Change current directory to the folder where the font is contained
os.chdir(folderPath)
print('Converting %s...(%d/%d)' % (styleName, i, totalFonts))
# Assemble TXT & PFA file names
fileNameNoExtension, fileExtension = os.path.splitext(fontFileName)
pfaPath = fileNameNoExtension + '.pfa'
txtPath = fileNameNoExtension + '.txt'
# Convert PFA to TXT using detype1
cmd = 'detype1 "%s" > "%s"' % (pfaPath, txtPath)
popen = Popen(cmd, shell=True, stdout=PIPE)
popenout, popenerr = popen.communicate()
if popenout:
print(popenout)
if popenerr:
print(popenerr)
# Delete PFA font
if os.path.exists(pfaPath):
os.remove(pfaPath)
i += 1
def run():
# if a path is provided
if len(sys.argv[1:]):
baseFolderPath = sys.argv[1]
if baseFolderPath[-1] == '/': # remove last slash if present
baseFolderPath = baseFolderPath[:-1]
# make sure the path is valid
if not os.path.isdir(baseFolderPath):
print('Invalid directory.')
return
# if a path is not provided, use the current directory
else:
baseFolderPath = os.getcwd()
t1 = time.time()
fontsList = getFontPaths(os.path.abspath(baseFolderPath))
if len(fontsList):
doTask(fontsList)
else:
print("No fonts found.")
return
t2 = time.time()
elapsedSeconds = t2 - t1
elapsedMinutes = elapsedSeconds / 60
if elapsedMinutes < 1:
print('Completed in %.1f seconds.' % elapsedSeconds)
else:
print('Completed in %.1f minutes.' % elapsedMinutes)
if __name__ == '__main__':
run()
| {
"content_hash": "ea60fc40f31df66d658f19ae08e8396f",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 76,
"avg_line_length": 27.22222222222222,
"alnum_prop": 0.6068027210884354,
"repo_name": "adobe-type-tools/python-scripts",
"id": "4336fedebcda6befb9bcb45d52d64f1b5123d9d4",
"size": "2963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pfa2txt.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "267212"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
from vespa.utils import get_station_coordinates
def fk_analysis(st, smax, fmin, fmax, tmin, tmax, stat='power'):
'''
Performs frequency-wavenumber space (FK) analysis on a stream of time series data for a given slowness range, frequency band and time window.
For an input stream of length K, the output is a K x K array with values of the chosen statistic calculated on a slowness grid in the x and y spatial dimensions. This statistic can be one of:-
* 'power' - the power in frequency-domain stack
* 'semblance' - the semblance calculated in the frequency domain
* 'F' - the F-statistic calculated in the frequency domain
Before the FK analysis is performed, the seismograms are cut to a time window between tmin and tmax, and the data is bandpass-filtered between frequencies fmin and fmax.
Parameters
----------
st : ObsPy Stream object
Stream of SAC format seismograms for the seismic array, length K = no. of stations in array
smax : float
Maximum magnitude of slowness, used for constructing the slowness grid in both x and y directions
fmin : float
Lower end of frequency band to perform FK analysis over
fmax : float
Upper end of frequency band to perform FK analysis over
tmin : float
Start of time window, seismograms are cut between tmin and tmax before FK starts
tmax : int
End of time window, seismograms are cut between tmin and tmax before FK starts
stat : string
Statistic that is to be calculated over the slowness grid, either 'power', 'semblance', or 'F'
Returns
-------
fk : NumPy array
The chosen statistic calculated at each point in a K x K grid of slowness values in the x and y directions
'''
assert (stat == 'power' or stat == 'semblance' or stat == 'F'), "Argument 'stat' must be one of 'power', 'semblance' or 'F'"
st = st.copy().trim(starttime=tmin, endtime=tmax)
# Retrieve metadata: time step and number of stations to be stacked
delta = st[0].stats.delta
nbeam = len(st)
# Pre-process, and filter to frequency window
st.detrend()
st.taper(type='cosine', max_percentage=0.05)
st = st.copy().filter("bandpass", freqmin=fmin, freqmax=fmax)
npts = st[0].stats.npts
# Computer Fourier transforms for each trace
fft_st = np.zeros((nbeam, (npts / 2) + 1), dtype=complex) # Length of real FFT is only half that of time series data
for i, tr in enumerate(st):
fft_st[i, :] = np.fft.rfft(tr.data) # Only need positive frequencies, so use rfft
freqs = np.fft.fftfreq(npts, delta)[0:(npts / 2) + 1]
# Slowness grid
slow_x = np.linspace(-smax, smax, nbeam)
slow_y = np.linspace(-smax, smax, nbeam)
# Array geometry
x, y = np.split(get_station_coordinates(st)[:, :2], 2, axis=1)
# convert to km
x /= 1000.
y /= 1000.
# Calculate the F-K spectrum
fk = np.zeros((nbeam, nbeam))
for ii in range(nbeam):
for jj in range(nbeam):
dt = slow_x[jj] * x + slow_y[ii] * y
beam = np.sum(np.exp(-1j * 2 * np.pi * np.outer(dt, freqs)) * fft_st / nbeam, axis=0)
fk[ii, jj] = np.vdot(beam, beam).real
if stat == 'semblance' or stat == 'F':
tracepower = np.vdot(fft_st, fft_st).real
if stat == 'semblance':
fk_semb = nbeam * fk / tracepower
return fk_semb
elif stat == 'F':
fk_F = (nbeam - 1)* nbeam * fk / (tracepower - nbeam * fk)
return fk_F
else:
return fk
def fk_slowness_vector(st, smax, fmin, fmax, tmin, tmax, stat='power'):
'''
Returns the slowness vector (amplitude and back azimuth) for time series data from a seismic array, as calculated using FK-beamforming.
Performs frequency-wavenumber space (FK) analysis on a stream of time series data for a given slowness range, frequency band and time window.
The output is a tuple containing the scalar slowness and backazimuth of the incoming wave, determined using a grid search to maximise the chosen beamforming statistic. This can be one of:-
* 'power' - the power in frequency-domain stack
* 'semblance' - the semblance calculated in the freqency domain
* 'F' - the F-statistic calculated in the frequency domain
Before the FK analysis is performed, the seismograms are cut to a time window between tmin and tmax, and the data is bandpass-filtered between frequencies fmin and fmax.
Parameters
----------
st : ObsPy Stream object
Stream of SAC format seismograms for the seismic array, length K = no. of stations in array
smax : float
Maximum magnitude of slowness, used for constructing the slowness grid in both x and y directions
fmin : float
Lower end of frequency band to perform FK analysis over
fmax : float
Upper end of frequency band to perform FK analysis over
tmin : float
Start of time window, seismograms are cut between tmin and tmax before FK starts
tmax : int
End of time window, seismograms are cut between tmin and tmax before FK starts
stat : string
Statistic that is to be calculated over the slowness grid, either 'power', 'semblance', or 'F'
Returns
-------
slowness : float
The scalar magnitude, in s/km, of the slowness of the incident seismic wave, as determined by the FK analysis
backazimuth: float
The backazimuth, in degrees, from North back to the epicentre of the incident seismic wave, as determined by the FK analysis
'''
nbeam = len(st)
fk = fk_analysis(st, smax, fmin, fmax, tmin, tmax, stat)
# Find maximum
fkmax = np.unravel_index(np.argmax(fk), (nbeam, nbeam))
# Slowness ranges
slow_x = np.linspace(-smax, smax, nbeam)
slow_y = np.linspace(-smax, smax, nbeam)
slow_x_max = slow_x[fkmax[1]]
slow_y_max = slow_y[fkmax[0]]
slowness = np.hypot(slow_x_max, slow_y_max)
backazimuth = np.degrees(np.arctan2(slow_x_max, slow_y_max))
if backazimuth < 0:
backazimuth += 360. # For backazimuths in range 0 - 360 deg
return (slowness, backazimuth)
def fk_plot(st, smax, fmin, fmax, tmin, tmax, stat='power'):
'''
Plots the results of FK analysis on a stream of time series data from a seismic array.
Performs frequency-wavenumber space (FK) analysis on a stream of time series data for a given slowness range, frequency band and time window.
This function plots the chosen statistic on a slowness grid in the x and y directions. The statistic can be one of:-
* 'power' - the power in frequency-domain stack
* 'semblance' - the semblance calculated in the freqency domain
* 'F' - the F-statistic calculated in the frequency domain
The title of the plot also contains the slowness and backazimuth for which the chosen statistic is maximised,
Before the FK analysis is performed, the seismograms are cut to a time window between tmin and tmax, and the data is bandpass-filtered between frequencies fmin and fmax.
Parameters
----------
st : ObsPy Stream object
Stream of SAC format seismograms for the seismic array, length K = no. of stations in array
smax : float
Maximum magnitude of slowness, used for constructing the slowness grid in both x and y directions
fmin : float
Lower end of frequency band to perform FK analysis over
fmax : float
Upper end of frequency band to perform FK analysis over
tmin : float
Start of time window, seismograms are cut between tmin and tmax before FK starts
tmax : int
End of time window, seismograms are cut between tmin and tmax before FK starts
stat : string
Statistic that is to be calculated over the slowness grid, either 'power', 'semblance', or 'F'
'''
nbeam = len(st)
fk = fk_analysis(st, smax, fmin, fmax, tmin, tmax, stat)
# Slowness ranges
slow_x = np.linspace(-smax, smax, nbeam)
slow_y = np.linspace(-smax, smax, nbeam)
# Find maximum
fkmax = np.unravel_index(np.argmax(fk), (nbeam, nbeam))
slow_x_max = slow_x[fkmax[1]]
slow_y_max = slow_y[fkmax[0]]
slowness = np.hypot(slow_x_max, slow_y_max)
backazimuth = np.degrees(np.arctan2(slow_x_max, slow_y_max))
if backazimuth < 0:
backazimuth += 360.
fig = plt.figure(figsize=(16, 14))
fig.add_axes([0.5,0.5,0.45,0.45])
plt.contourf(slow_x, slow_y, fk, 16)
plt.grid('on', linestyle='-')
plt.xlabel('slowness east (s/km)')
plt.ylabel('slowness north (s/km)')
cb = plt.colorbar()
cb.set_label(stat)
plt.xlim(-smax, smax);
plt.ylim(-smax, smax);
plt.title("FK Analysis, slowness= " + '%.4f' % slowness + " s/km, backazimuth= " + '%.1f' % backazimuth + " deg")
plt.show()
| {
"content_hash": "0d3f0c11c7e694ed0172f17f249ba59c",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 196,
"avg_line_length": 41.148648648648646,
"alnum_prop": 0.647400109469075,
"repo_name": "NeilWilkins/vespa",
"id": "c4ffa8c52b591879282fb80fdb84244d879fe4d3",
"size": "9187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vespa/fk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50047"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.