repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
GonzaloAlvarez/py-ga-sysadmin | lib/fmdplugins/list_records.py | fbbbbcad36df9f1b3e40328ff48c22bad13a56f4 | from lib.fmd.namedentity import NamedEntity
from lib.fmd.decorators import Action, ListStage, GetStage
from lib.exceptions.workflow import EntryException
@Action(ListStage.DATAGATHERING)
def list_records(context, output):
output = []
if hasattr(context, 'filter'):
context.log.debug('Using filter [%s]' % context.filter)
entries = context.ddb.list(context.filter)
else:
entries = context.ddb.list()
return NamedEntity('records', entries)
| [((5, 1, 5, 32), 'lib.fmd.decorators.Action', 'Action', ({(5, 8, 5, 31): 'ListStage.DATAGATHERING'}, {}), '(ListStage.DATAGATHERING)', False, 'from lib.fmd.decorators import Action, ListStage, GetStage\n'), ((15, 11, 15, 42), 'lib.fmd.namedentity.NamedEntity', 'NamedEntity', ({(15, 23, 15, 32): '"""records"""', (15, 34, 15, 41): 'entries'}, {}), "('records', entries)", False, 'from lib.fmd.namedentity import NamedEntity\n')] |
zetahernandez/pysoa | pysoa/server/action/switched.py | 006e55ba877196a42c64f2ff453583d366082d55 | from __future__ import (
absolute_import,
unicode_literals,
)
import abc
import six
from pysoa.server.internal.types import is_switch
__all__ = (
'SwitchedAction',
)
def _len(item):
# Safe length that won't raise an error on values that don't support length
return getattr(item, '__len__', lambda *_: -1)()
class _DefaultAction(object):
def __int__(self):
d = id(self)
return d if d < 0 else -d
def __eq__(self, other):
return getattr(other, '__class__', None) == _DefaultAction
class _SwitchedActionMetaClass(abc.ABCMeta):
def __new__(mcs, name, bases, body):
"""
Validate the switch_to_action_map when the class is created, instead of doing it every time the class
is instantiated. This identifies problems earlier (on import) and improves performance by not performing this
validation every time the action is called.
"""
cls = super(_SwitchedActionMetaClass, mcs).__new__(mcs, name, bases, body)
# noinspection PyUnresolvedReferences
if bases[0] is not object and (
not cls.switch_to_action_map or
not hasattr(cls.switch_to_action_map, '__iter__') or
_len(cls.switch_to_action_map) < 2 or
any(
True for i in cls.switch_to_action_map
if not hasattr(i, '__getitem__') or _len(i) != 2 or not is_switch(i[0]) or not callable(i[1])
)
):
raise ValueError(
'Class attribute switch_to_action_map must be an iterable of at least two indexable items, each '
'with exactly two indexes, where the first element is a switch and the second element is an action '
'(callable).'
)
return cls
@six.add_metaclass(_SwitchedActionMetaClass)
class SwitchedAction(object):
"""
A specialized action that defers to other, concrete actions based on request switches. Subclasses must not
override any methods and must override `switch_to_action_map`. `switch_to_action_map` should be some iterable
object that provides `__len__` (such as a tuple [recommended] or list). Its items must be indexable objects that
provide `__len__` (such as a tuple [recommended] or list) and have exactly two elements.
For each item in `switch_to_action_map`, the first element must be a switch that provides `__int__` (such as an
actual integer) or a switch that provides an attribute `value` which, itself, provides `__int__` (or is an int).
The second element must be an action, such as an action class (e.g. one that extends `Action`) or any callable
that accepts a server settings object and returns a new callable that, itself, accepts an `ActionRequest` object
and returns an `ActionResponse` object or raises an `ActionError`.
`switch_to_action_map` must have at least two items in it. `SwitchedAction` will iterate over that list, checking
the first element (switch) of each item to see if it is enabled in the request. If it is, the second element (the
action) of that item will be deferred to. If it finds no items whose switches are enabled, it will use the very
last action in `switch_to_action_map`. As such, you can treat the last item as a default, and its switch could
simply be `SwitchedAction.DEFAULT_ACTION` (although, this is not required: it could also be a valid switch, and
it would still be treated as the default in the case that no other items matched).
Example usage:
.. code-block:: python
class UserActionV1(Action):
...
class UserActionV2(Action):
...
class UserTransitionAction(SwitchedAction):
switch_to_action_map = (
(USER_VERSION_2_ENABLED, UserActionV2),
(SwitchedAction.DEFAULT_ACTION, UserActionV1),
)
"""
DEFAULT_ACTION = _DefaultAction()
switch_to_action_map = ()
def __init__(self, settings=None):
"""
Construct a new action. Concrete classes should not override this.
:param settings: The server settings object
:type settings: dict
"""
if self.__class__ is SwitchedAction:
raise TypeError('Cannot instantiate abstract SwitchedAction')
self.settings = settings
def get_uninitialized_action(self, action_request):
"""
Get the raw action (such as the action class or the base action callable) without instantiating/calling
it, based on the switches in the action request, or the default raw action if no switches were present or
no switches matched.
:param action_request: The request object
:type action_request: EnrichedActionRequest
:return: The action
:rtype: callable
"""
last_action = None
matched_action = None
default_action = None
for switch, action in self.switch_to_action_map:
if switch == self.DEFAULT_ACTION:
default_action = action
elif switch and action_request.switches.is_active(switch):
matched_action = action
break
else:
last_action = action
return matched_action or default_action or last_action
def __call__(self, action_request):
"""
Main entry point for actions from the `Server` (or potentially from tests). Finds the appropriate real action
to invoke based on the switches enabled in the request, initializes the action with the server settings, and
then calls the action with the request object, returning its response directly.
:param action_request: The request object
:type action_request: EnrichedActionRequest
:return: The response object
:rtype: ActionResponse
:raise: ActionError, ResponseValidationError
"""
return self.get_uninitialized_action(action_request)(self.settings)(action_request)
| [((60, 1, 60, 44), 'six.add_metaclass', 'six.add_metaclass', ({(60, 19, 60, 43): '_SwitchedActionMetaClass'}, {}), '(_SwitchedActionMetaClass)', False, 'import six\n'), ((48, 72, 48, 87), 'pysoa.server.internal.types.is_switch', 'is_switch', ({(48, 82, 48, 86): 'i[0]'}, {}), '(i[0])', False, 'from pysoa.server.internal.types import is_switch\n')] |
WebarchivCZ/Seeder | Seeder/settings/tests.py | 1958c5d3f6bdcbbdb2c81dcb6abc7f689125b6a8 | from .base import *
SECRET_KEY = 'test'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'sqlite3.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
},
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
} | [] |
odeke-em/resty | blobStore.py | 838934033e7eeca521e8c6d8cb2e99778beaa4b9 | #!/usr/bin/env python3
# Author: Emmanuel Odeke <[email protected]>
# This example steps you through using resty & restAssured to save pickled/serialized
# data as a blob and then later re-using it in after deserialization.
# Sample usage might be in collaborative computing ie publish results from an expensive
# computation on one machine so that other machines can load it as live data.
def testSerializer():
import Serializer
bs = Serializer.BinarySerializer()
js = Serializer.JSONSerializer()
data = dict((i, i) for i in range(10))
bserial = bs.serialize(data)
jserial = js.serialize(data)
bdserial = bs.deserialize(bserial)
jdserial = js.deserialize(jserial)
print('bdserial', bdserial)
ioS = bs.ioStream(bserial)
ioR = ioS.read()
print('ioS data from the stream', ioR)
def testCloudPassagePickledVersion():
from entrails.cloudPassage import CloudPassageHandler
cc = CloudPassageHandler()
data = dict((i, i*10) for i in range(9))
title = 'Dict of items 0-8999, keys i*10'
res = cc.push(data, title=title, asPickle=True)
pulledObj = cc.pull(metaData='pickle')
print('PulledObj', pulledObj, data)
assert(pulledObj == data)
rmTry = cc.removeTrace(data, asPickle=True)
print(rmTry)
def testCloudPassageJSONVersion():
from entrails.cloudPassage import CloudPassageHandler
cc = CloudPassageHandler()
data = dict((str(i), i*10) for i in range(9))
title = 'Dict of items 0-8999, keys i*10'
res = cc.push(data, title=title, asPickle=False)
pulledObj = cc.pull(metaData='json')
print('PulledObj', pulledObj, data)
assert(pulledObj == data)
rmTry = cc.removeTrace(data)
print(rmTry)
def main():
testSerializer()
testCloudPassageJSONVersion()
testCloudPassagePickledVersion()
if __name__ == '__main__':
main()
| [((11, 9, 11, 38), 'Serializer.BinarySerializer', 'Serializer.BinarySerializer', ({}, {}), '()', False, 'import Serializer\n'), ((12, 9, 12, 36), 'Serializer.JSONSerializer', 'Serializer.JSONSerializer', ({}, {}), '()', False, 'import Serializer\n'), ((27, 9, 27, 30), 'entrails.cloudPassage.CloudPassageHandler', 'CloudPassageHandler', ({}, {}), '()', False, 'from entrails.cloudPassage import CloudPassageHandler\n'), ((41, 9, 41, 30), 'entrails.cloudPassage.CloudPassageHandler', 'CloudPassageHandler', ({}, {}), '()', False, 'from entrails.cloudPassage import CloudPassageHandler\n')] |
kavanAdeshara/Expense_Tracker | venv/Lib/site-packages/dataframe/_dataframe_column_set.py | b3e4810e858a7786e05cda6b91ba674b73b87981 | # dataframe: a data-frame implementation using method piping
#
# Copyright (C) 2016 Simon Dirmeier
#
# This file is part of dataframe.
#
# dataframe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataframe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataframe. If not, see <http://www.gnu.org/licenses/>.
#
#
# @author = 'Simon Dirmeier'
# @email = '[email protected]'
from itertools import chain
import tabulate
from ._dataframe_column import DataFrameColumn
from ._dataframe_row import DataFrameRow
class DataFrameColumnSet:
def __init__(self, **kwargs):
self.__data_columns = []
self.__nrow = -1
self.cbind(**kwargs)
def __getitem__(self, item):
if isinstance(item, int):
return self.__data_columns[item]
raise ValueError("Item should be integer!")
def __iter__(self):
for col in self.__data_columns:
yield col
def __str__(self):
stri = "\nA dataframe"
ta = []
for col in self.__data_columns:
vals = col.values
if len(vals) > 10:
vals = list(chain(vals[:3], "...", vals[-3:]))
ta.append(vals)
ta = tabulate.tabulate(zip(*ta), headers=self.colnames)
return stri + "\n\n" + ta.__str__()
@property
def nrow(self):
return self.__nrow
@property
def ncol(self):
return len(self.colnames)
@property
def colnames(self):
return [x.colname for x in self.__data_columns]
def rows(self, idxs):
return [self.row(i) for i in idxs]
def row(self, idx):
"""
Returns DataFrameRow of the DataFrame given its index.
:param idx: the index of the row in the DataFrame.
:return: returns a DataFrameRow
"""
return DataFrameRow(idx, [x[idx] for x in self], self.colnames)
def which_colnames(self, *args):
idx = []
for i in range(len(self.__data_columns)):
if self.colnames[i] in args:
idx.append(i)
return idx
def cbind(self, **columns):
keys = sorted([x for x in columns.keys()])
for k in keys:
self.__cbind(DataFrameColumn(str(k), columns.get(k)))
def __cbind(self, column):
if column.colname in self.colnames:
ValueError("Appending duplicate col-name!")
self.__data_columns.append(column)
self.__nrow = self.__data_columns[-1].size()
for col in self.__data_columns:
if col.size() != self.__nrow:
raise ValueError("Columns do not have equal lengths!")
| [((52, 28, 52, 61), 'itertools.chain', 'chain', ({(52, 34, 52, 42): 'vals[:3]', (52, 44, 52, 49): '"""..."""', (52, 51, 52, 60): 'vals[-3:]'}, {}), "(vals[:3], '...', vals[-3:])", False, 'from itertools import chain\n')] |
Springworks/rules_docker | java/image.bzl | b943cd1fe3bf1c6c5fdac1889e952408599cffff | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A rule for creating a Java container image.
The signature of java_image is compatible with java_binary.
The signature of war_image is compatible with java_library.
"""
load(
"//container:container.bzl",
"container_pull",
_repositories = "repositories",
)
# Load the resolved digests.
load(
":java.bzl",
_JAVA_DIGESTS = "DIGESTS",
)
load(
":jetty.bzl",
_JETTY_DIGESTS = "DIGESTS",
)
def repositories():
# Call the core "repositories" function to reduce boilerplate.
# This is idempotent if folks call it themselves.
_repositories()
excludes = native.existing_rules().keys()
if "java_image_base" not in excludes:
container_pull(
name = "java_image_base",
registry = "gcr.io",
repository = "distroless/java",
digest = _JAVA_DIGESTS["latest"],
)
if "java_debug_image_base" not in excludes:
container_pull(
name = "java_debug_image_base",
registry = "gcr.io",
repository = "distroless/java",
digest = _JAVA_DIGESTS["debug"],
)
if "jetty_image_base" not in excludes:
container_pull(
name = "jetty_image_base",
registry = "gcr.io",
repository = "distroless/java/jetty",
digest = _JETTY_DIGESTS["latest"],
)
if "jetty_debug_image_base" not in excludes:
container_pull(
name = "jetty_debug_image_base",
registry = "gcr.io",
repository = "distroless/java/jetty",
digest = _JETTY_DIGESTS["debug"],
)
if "servlet_api" not in excludes:
native.maven_jar(
name = "javax_servlet_api",
artifact = "javax.servlet:javax.servlet-api:3.0.1",
)
DEFAULT_JAVA_BASE = select({
"@io_bazel_rules_docker//:fastbuild": "@java_image_base//image",
"@io_bazel_rules_docker//:debug": "@java_debug_image_base//image",
"@io_bazel_rules_docker//:optimized": "@java_image_base//image",
"//conditions:default": "@java_image_base//image",
})
DEFAULT_JETTY_BASE = select({
"@io_bazel_rules_docker//:fastbuild": "@jetty_image_base//image",
"@io_bazel_rules_docker//:debug": "@jetty_debug_image_base//image",
"@io_bazel_rules_docker//:optimized": "@jetty_image_base//image",
"//conditions:default": "@jetty_image_base//image",
})
load(
"//container:container.bzl",
_container = "container",
)
def java_files(f):
files = []
if java_common.provider in f:
java_provider = f[java_common.provider]
files += list(java_provider.transitive_runtime_jars)
if hasattr(f, "files"): # a jar file
files += list(f.files)
return files
load(
"//lang:image.bzl",
"dep_layer_impl",
"layer_file_path",
)
def _jar_dep_layer_impl(ctx):
"""Appends a layer for a single dependency's runfiles."""
return dep_layer_impl(ctx, runfiles = java_files)
jar_dep_layer = rule(
attrs = dict(_container.image.attrs.items() + {
# The base image on which to overlay the dependency layers.
"base": attr.label(mandatory = True),
# The dependency whose runfiles we're appending.
"dep": attr.label(mandatory = True),
# Whether to lay out each dependency in a manner that is agnostic
# of the binary in which it is participating. This can increase
# sharing of the dependency's layer across images, but requires a
# symlink forest in the app layers.
"agnostic_dep_layout": attr.bool(default = True),
# Override the defaults.
"directory": attr.string(default = "/app"),
# https://github.com/bazelbuild/bazel/issues/2176
"data_path": attr.string(default = "."),
}.items()),
executable = True,
outputs = _container.image.outputs,
implementation = _jar_dep_layer_impl,
)
def _jar_app_layer_impl(ctx):
"""Appends the app layer with all remaining runfiles."""
available = depset()
for jar in ctx.attr.jar_layers:
available += java_files(jar)
# We compute the set of unavailable stuff by walking deps
# in the same way, adding in our binary and then subtracting
# out what it available.
unavailable = depset()
for jar in ctx.attr.deps + ctx.attr.runtime_deps:
unavailable += java_files(jar)
unavailable += java_files(ctx.attr.binary)
unavailable = [x for x in unavailable if x not in available]
classpath = ":".join([
layer_file_path(ctx, x)
for x in available + unavailable
])
# Classpaths can grow long and there is a limit on the length of a
# command line, so mitigate this by always writing the classpath out
# to a file instead.
classpath_file = ctx.new_file(ctx.attr.name + ".classpath")
ctx.actions.write(classpath_file, classpath)
binary_path = layer_file_path(ctx, ctx.files.binary[0])
classpath_path = layer_file_path(ctx, classpath_file)
entrypoint = [
"/usr/bin/java",
"-cp",
# Support optionally passing the classpath as a file.
"@" + classpath_path if ctx.attr._classpath_as_file else classpath,
] + ctx.attr.jvm_flags + [ctx.attr.main_class] + ctx.attr.args
file_map = {
layer_file_path(ctx, f): f
for f in unavailable + [classpath_file]
}
return _container.image.implementation(
ctx,
# We use all absolute paths.
directory = "/",
file_map = file_map,
entrypoint = entrypoint,
)
jar_app_layer = rule(
attrs = dict(_container.image.attrs.items() + {
# The binary target for which we are synthesizing an image.
"binary": attr.label(mandatory = True),
# The full list of dependencies that have their own layers
# factored into our base.
"jar_layers": attr.label_list(),
# The rest of the dependencies.
"deps": attr.label_list(),
"runtime_deps": attr.label_list(),
"jvm_flags": attr.string_list(),
# The base image on which to overlay the dependency layers.
"base": attr.label(mandatory = True),
# The main class to invoke on startup.
"main_class": attr.string(mandatory = True),
# Whether to lay out each dependency in a manner that is agnostic
# of the binary in which it is participating. This can increase
# sharing of the dependency's layer across images, but requires a
# symlink forest in the app layers.
"agnostic_dep_layout": attr.bool(default = True),
# Whether the classpath should be passed as a file.
"_classpath_as_file": attr.bool(default = False),
# Override the defaults.
"directory": attr.string(default = "/app"),
# https://github.com/bazelbuild/bazel/issues/2176
"data_path": attr.string(default = "."),
"legacy_run_behavior": attr.bool(default = False),
}.items()),
executable = True,
outputs = _container.image.outputs,
implementation = _jar_app_layer_impl,
)
def java_image(
name,
base = None,
main_class = None,
deps = [],
runtime_deps = [],
layers = [],
jvm_flags = [],
**kwargs):
"""Builds a container image overlaying the java_binary.
Args:
layers: Augments "deps" with dependencies that should be put into
their own layers.
**kwargs: See java_binary.
"""
binary_name = name + ".binary"
native.java_binary(
name = binary_name,
main_class = main_class,
# If the rule is turning a JAR built with java_library into
# a binary, then it will appear in runtime_deps. We are
# not allowed to pass deps (even []) if there is no srcs
# kwarg.
deps = (deps + layers) or None,
runtime_deps = runtime_deps,
jvm_flags = jvm_flags,
**kwargs
)
base = base or DEFAULT_JAVA_BASE
for index, dep in enumerate(layers):
this_name = "%s.%d" % (name, index)
jar_dep_layer(name = this_name, base = base, dep = dep)
base = this_name
visibility = kwargs.get("visibility", None)
jar_app_layer(
name = name,
base = base,
binary = binary_name,
main_class = main_class,
jvm_flags = jvm_flags,
deps = deps,
runtime_deps = runtime_deps,
jar_layers = layers,
visibility = visibility,
args = kwargs.get("args"),
)
def _war_dep_layer_impl(ctx):
"""Appends a layer for a single dependency's runfiles."""
# TODO(mattmoor): Today we run the risk of filenames colliding when
# they get flattened. Instead of just flattening and using basename
# we should use a file_map based scheme.
return _container.image.implementation(
ctx,
files = java_files(ctx.attr.dep),
)
_war_dep_layer = rule(
attrs = dict(_container.image.attrs.items() + {
# The base image on which to overlay the dependency layers.
"base": attr.label(mandatory = True),
# The dependency whose runfiles we're appending.
"dep": attr.label(mandatory = True),
# Whether to lay out each dependency in a manner that is agnostic
# of the binary in which it is participating. This can increase
# sharing of the dependency's layer across images, but requires a
# symlink forest in the app layers.
"agnostic_dep_layout": attr.bool(default = True),
# Override the defaults.
"directory": attr.string(default = "/jetty/webapps/ROOT/WEB-INF/lib"),
# WE WANT PATHS FLATTENED
# "data_path": attr.string(default = "."),
}.items()),
executable = True,
outputs = _container.image.outputs,
implementation = _war_dep_layer_impl,
)
def _war_app_layer_impl(ctx):
"""Appends the app layer with all remaining runfiles."""
available = depset()
for jar in ctx.attr.jar_layers:
available += java_files(jar)
# This is based on rules_appengine's WAR rules.
transitive_deps = depset()
transitive_deps += java_files(ctx.attr.library)
# TODO(mattmoor): Handle data files.
# If we start putting libs in servlet-agnostic paths,
# then consider adding symlinks here.
files = [d for d in transitive_deps if d not in available]
return _container.image.implementation(ctx, files = files)
_war_app_layer = rule(
attrs = dict(_container.image.attrs.items() + {
# The library target for which we are synthesizing an image.
"library": attr.label(mandatory = True),
# The full list of dependencies that have their own layers
# factored into our base.
"jar_layers": attr.label_list(),
# The base image on which to overlay the dependency layers.
"base": attr.label(mandatory = True),
"entrypoint": attr.string_list(default = []),
# Whether to lay out each dependency in a manner that is agnostic
# of the binary in which it is participating. This can increase
# sharing of the dependency's layer across images, but requires a
# symlink forest in the app layers.
"agnostic_dep_layout": attr.bool(default = True),
# Override the defaults.
"directory": attr.string(default = "/jetty/webapps/ROOT/WEB-INF/lib"),
# WE WANT PATHS FLATTENED
# "data_path": attr.string(default = "."),
"legacy_run_behavior": attr.bool(default = False),
}.items()),
executable = True,
outputs = _container.image.outputs,
implementation = _war_app_layer_impl,
)
def war_image(name, base = None, deps = [], layers = [], **kwargs):
"""Builds a container image overlaying the java_library as an exploded WAR.
TODO(mattmoor): For `bazel run` of this to be useful, we need to be able
to ctrl-C it and have the container actually terminate. More information:
https://github.com/bazelbuild/bazel/issues/3519
Args:
layers: Augments "deps" with dependencies that should be put into
their own layers.
**kwargs: See java_library.
"""
library_name = name + ".library"
native.java_library(name = library_name, deps = deps + layers, **kwargs)
base = base or DEFAULT_JETTY_BASE
for index, dep in enumerate(layers):
this_name = "%s.%d" % (name, index)
_war_dep_layer(name = this_name, base = base, dep = dep)
base = this_name
visibility = kwargs.get("visibility", None)
tags = kwargs.get("tags", None)
_war_app_layer(
name = name,
base = base,
library = library_name,
jar_layers = layers,
visibility = visibility,
tags = tags,
)
| [] |
khushi-411/cupy | cupyx/jit/_builtin_funcs.py | b5221a478c800c5e60eef65545467de9eb00c0d9 | import warnings
import cupy
from cupy_backends.cuda.api import runtime
from cupy.cuda import device
from cupyx.jit import _cuda_types
from cupyx.jit._internal_types import BuiltinFunc
from cupyx.jit._internal_types import Data
from cupyx.jit._internal_types import Constant
from cupyx.jit._internal_types import Range
from cupyx.jit import _compile
from functools import reduce
class RangeFunc(BuiltinFunc):
def __call__(self, *args, unroll=None):
"""Range with loop unrolling support.
Args:
start (int):
Same as that of built-in :obj:`range`.
stop (int):
Same as that of built-in :obj:`range`.
step (int):
Same as that of built-in :obj:`range`.
unroll (int or bool or None):
- If `True`, add ``#pragma unroll`` directive before the
loop.
- If `False`, add ``#pragma unroll(1)`` directive before
the loop to disable unrolling.
- If an `int`, add ``#pragma unroll(n)`` directive before
the loop, where the integer ``n`` means the number of
iterations to unroll.
- If `None` (default), leave the control of loop unrolling
to the compiler (no ``#pragma``).
.. seealso:: `#pragma unroll`_
.. _#pragma unroll:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll
"""
super().__call__()
def call(self, env, *args, unroll=None):
if len(args) == 0:
raise TypeError('range expected at least 1 argument, got 0')
elif len(args) == 1:
start, stop, step = Constant(0), args[0], Constant(1)
elif len(args) == 2:
start, stop, step = args[0], args[1], Constant(1)
elif len(args) == 3:
start, stop, step = args
else:
raise TypeError(
f'range expected at most 3 argument, got {len(args)}')
if unroll is not None:
if not all(isinstance(x, Constant)
for x in (start, stop, step, unroll)):
raise TypeError(
'loop unrolling requires constant start, stop, step and '
'unroll value')
unroll = unroll.obj
if not (isinstance(unroll, int) or isinstance(unroll, bool)):
raise TypeError(
'unroll value expected to be of type int, '
f'got {type(unroll).__name__}')
if unroll is False:
unroll = 1
if not (unroll is True or 0 < unroll < 1 << 31):
warnings.warn(
'loop unrolling is ignored as the unroll value is '
'non-positive or greater than INT_MAX')
if isinstance(step, Constant):
step_is_positive = step.obj >= 0
elif step.ctype.dtype.kind == 'u':
step_is_positive = True
else:
step_is_positive = None
stop = Data.init(stop, env)
start = Data.init(start, env)
step = Data.init(step, env)
if start.ctype.dtype.kind not in 'iu':
raise TypeError('range supports only for integer type.')
if stop.ctype.dtype.kind not in 'iu':
raise TypeError('range supports only for integer type.')
if step.ctype.dtype.kind not in 'iu':
raise TypeError('range supports only for integer type.')
if env.mode == 'numpy':
ctype = _cuda_types.Scalar(int)
elif env.mode == 'cuda':
ctype = stop.ctype
else:
assert False
return Range(start, stop, step, ctype, step_is_positive, unroll=unroll)
class LenFunc(BuiltinFunc):
def call(self, env, *args, **kwds):
if len(args) != 1:
raise TypeError(f'len() expects only 1 argument, got {len(args)}')
if kwds:
raise TypeError('keyword arguments are not supported')
arg = args[0]
if not isinstance(arg.ctype, _cuda_types.CArray):
raise TypeError('len() supports only array type')
if not arg.ctype.ndim:
raise TypeError('len() of unsized array')
return Data(f'static_cast<long long>({arg.code}.shape()[0])',
_cuda_types.Scalar('q'))
class MinFunc(BuiltinFunc):
def call(self, env, *args, **kwds):
if len(args) < 2:
raise TypeError(
f'min() expects at least 2 arguments, got {len(args)}')
if kwds:
raise TypeError('keyword arguments are not supported')
return reduce(lambda a, b: _compile._call_ufunc(
cupy.minimum, (a, b), None, env), args)
class MaxFunc(BuiltinFunc):
def call(self, env, *args, **kwds):
if len(args) < 2:
raise TypeError(
f'max() expects at least 2 arguments, got {len(args)}')
if kwds:
raise TypeError('keyword arguments are not supported')
return reduce(lambda a, b: _compile._call_ufunc(
cupy.maximum, (a, b), None, env), args)
class SyncThreads(BuiltinFunc):
def __call__(self):
"""Calls ``__syncthreads()``.
.. seealso:: `Synchronization functions`_
.. _Synchronization functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions
"""
super().__call__()
def call_const(self, env):
return Data('__syncthreads()', _cuda_types.void)
class SyncWarp(BuiltinFunc):
def __call__(self, *, mask=0xffffffff):
"""Calls ``__syncwarp()``.
Args:
mask (int): Active threads in a warp. Default is 0xffffffff.
.. seealso:: `Synchronization functions`_
.. _Synchronization functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions
"""
super().__call__()
def call(self, env, *, mask=None):
if runtime.is_hip:
if mask is not None:
warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning)
mask = None
if mask:
if isinstance(mask, Constant):
if not (0x0 <= mask.obj <= 0xffffffff):
raise ValueError('mask is out of range')
mask = _compile._astype_scalar(
mask, _cuda_types.int32, 'same_kind', env)
mask = Data.init(mask, env)
code = f'__syncwarp({mask.code})'
else:
code = '__syncwarp()'
return Data(code, _cuda_types.void)
class SharedMemory(BuiltinFunc):
def __call__(self, dtype, size, alignment=None):
"""Allocates shared memory and returns it as a 1-D array.
Args:
dtype (dtype):
The dtype of the returned array.
size (int or None):
If ``int`` type, the size of static shared memory.
If ``None``, declares the shared memory with extern specifier.
alignment (int or None): Enforce the alignment via __align__(N).
"""
super().__call__()
def call_const(self, env, dtype, size, alignment=None):
name = env.get_fresh_variable_name(prefix='_smem')
child_type = _cuda_types.Scalar(dtype)
while env[name] is not None:
name = env.get_fresh_variable_name(prefix='_smem') # retry
var = Data(name, _cuda_types.SharedMem(child_type, size, alignment))
env.decls[name] = var
env.locals[name] = var
return Data(name, _cuda_types.Ptr(child_type))
class AtomicOp(BuiltinFunc):
def __init__(self, op, dtypes):
self._op = op
self._name = 'atomic' + op
self._dtypes = dtypes
doc = f"""Calls the ``{self._name}`` function to operate atomically on
``array[index]``. Please refer to `Atomic Functions`_ for detailed
explanation.
Args:
array: A :class:`cupy.ndarray` to index over.
index: A valid index such that the address to the corresponding
array element ``array[index]`` can be computed.
value: Represent the value to use for the specified operation. For
the case of :obj:`atomic_cas`, this is the value for
``array[index]`` to compare with.
alt_value: Only used in :obj:`atomic_cas` to represent the value
to swap to.
.. seealso:: `Numba's corresponding atomic functions`_
.. _Atomic Functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions
.. _Numba's corresponding atomic functions:
https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations
"""
self.__doc__ = doc
def __call__(self, array, index, value, alt_value=None):
super().__call__()
def call(self, env, array, index, value, value2=None):
name = self._name
op = self._op
array = Data.init(array, env)
if not isinstance(array.ctype, (_cuda_types.CArray, _cuda_types.Ptr)):
raise TypeError('The first argument must be of array type.')
target = _compile._indexing(array, index, env)
ctype = target.ctype
if ctype.dtype.name not in self._dtypes:
raise TypeError(f'`{name}` does not support {ctype.dtype} input.')
# On HIP, 'e' is not supported and we will never reach here
if (op == 'Add' and ctype.dtype.char == 'e'
and runtime.runtimeGetVersion() < 10000):
raise RuntimeError(
'float16 atomic operation is not supported before CUDA 10.0.')
value = _compile._astype_scalar(value, ctype, 'same_kind', env)
value = Data.init(value, env)
if op == 'CAS':
assert value2 is not None
# On HIP, 'H' is not supported and we will never reach here
if ctype.dtype.char == 'H':
if runtime.runtimeGetVersion() < 10010:
raise RuntimeError(
'uint16 atomic operation is not supported before '
'CUDA 10.1')
if int(device.get_compute_capability()) < 70:
raise RuntimeError(
'uint16 atomic operation is not supported before '
'sm_70')
value2 = _compile._astype_scalar(value2, ctype, 'same_kind', env)
value2 = Data.init(value2, env)
code = f'{name}(&{target.code}, {value.code}, {value2.code})'
else:
assert value2 is None
code = f'{name}(&{target.code}, {value.code})'
return Data(code, ctype)
class GridFunc(BuiltinFunc):
def __init__(self, mode):
if mode == 'grid':
self._desc = 'Compute the thread index in the grid.'
self._eq = 'jit.threadIdx.x + jit.blockIdx.x * jit.blockDim.x'
self._link = 'numba.cuda.grid'
self._code = 'threadIdx.{n} + blockIdx.{n} * blockDim.{n}'
elif mode == 'gridsize':
self._desc = 'Compute the grid size.'
self._eq = 'jit.blockDim.x * jit.gridDim.x'
self._link = 'numba.cuda.gridsize'
self._code = 'blockDim.{n} * gridDim.{n}'
else:
raise ValueError('unsupported function')
doc = f""" {self._desc}
Computation of the first integer is as follows::
{self._eq}
and for the other two integers the ``y`` and ``z`` attributes are used.
Args:
ndim (int): The dimension of the grid. Only 1, 2, or 3 is allowed.
Returns:
int or tuple:
If ``ndim`` is 1, an integer is returned, otherwise a tuple.
.. note::
This function follows the convention of Numba's
:func:`{self._link}`.
"""
self.__doc__ = doc
def __call__(self, ndim):
super().__call__()
def call_const(self, env, ndim):
if not isinstance(ndim, int):
raise TypeError('ndim must be an integer')
# Numba convention: for 1D we return a single variable,
# otherwise a tuple
if ndim == 1:
return Data(self._code.format(n='x'), _cuda_types.uint32)
elif ndim == 2:
dims = ('x', 'y')
elif ndim == 3:
dims = ('x', 'y', 'z')
else:
raise ValueError('Only ndim=1,2,3 are supported')
elts_code = ', '.join(self._code.format(n=n) for n in dims)
ctype = _cuda_types.Tuple([_cuda_types.uint32]*ndim)
return Data(f'thrust::make_tuple({elts_code})', ctype)
class WarpShuffleOp(BuiltinFunc):
def __init__(self, op, dtypes):
self._op = op
self._name = '__shfl_' + (op + '_' if op else '') + 'sync'
self._dtypes = dtypes
doc = f"""Calls the ``{self._name}`` function. Please refer to
`Warp Shuffle Functions`_ for detailed explanation.
.. _Warp Shuffle Functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions
"""
self.__doc__ = doc
def __call__(self, mask, var, val_id, *, width=32):
super().__call__()
def call(self, env, mask, var, val_id, *, width=None):
name = self._name
var = Data.init(var, env)
ctype = var.ctype
if ctype.dtype.name not in self._dtypes:
raise TypeError(f'`{name}` does not support {ctype.dtype} input.')
try:
mask = mask.obj
except Exception:
raise TypeError('mask must be an integer')
if runtime.is_hip:
warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning)
elif not (0x0 <= mask <= 0xffffffff):
raise ValueError('mask is out of range')
# val_id refers to "delta" for shfl_{up, down}, "srcLane" for shfl, and
# "laneMask" for shfl_xor
if self._op in ('up', 'down'):
val_id_t = _cuda_types.uint32
else:
val_id_t = _cuda_types.int32
val_id = _compile._astype_scalar(val_id, val_id_t, 'same_kind', env)
val_id = Data.init(val_id, env)
if width:
if isinstance(width, Constant):
if width.obj not in (2, 4, 8, 16, 32):
raise ValueError('width needs to be power of 2')
else:
width = Constant(64) if runtime.is_hip else Constant(32)
width = _compile._astype_scalar(
width, _cuda_types.int32, 'same_kind', env)
width = Data.init(width, env)
code = f'{name}({hex(mask)}, {var.code}, {val_id.code}'
code += f', {width.code})'
return Data(code, ctype)
class LaneID(BuiltinFunc):
def __call__(self):
"""Returns the lane ID of the calling thread, ranging in
``[0, jit.warpsize)``.
.. note::
Unlike :obj:`numba.cuda.laneid`, this is a callable function
instead of a property.
"""
super().__call__()
def _get_preamble(self):
preamble = '__device__ __forceinline__ unsigned int LaneId() {'
if not runtime.is_hip:
# see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419
preamble += """
unsigned int ret;
asm ("mov.u32 %0, %%laneid;" : "=r"(ret) );
return ret; }
"""
else:
# defined in hip/hcc_detail/device_functions.h
preamble += """
return __lane_id(); }
"""
return preamble
def call_const(self, env):
env.generated.add_code(self._get_preamble())
return Data('LaneId()', _cuda_types.uint32)
builtin_functions_dict = {
range: RangeFunc(),
len: LenFunc(),
min: MinFunc(),
max: MaxFunc(),
}
range_ = RangeFunc()
syncthreads = SyncThreads()
syncwarp = SyncWarp()
shared_memory = SharedMemory()
grid = GridFunc('grid')
gridsize = GridFunc('gridsize')
laneid = LaneID()
# atomic functions
atomic_add = AtomicOp(
'Add',
('int32', 'uint32', 'uint64', 'float32', 'float64')
+ (() if runtime.is_hip else ('float16',)))
atomic_sub = AtomicOp(
'Sub', ('int32', 'uint32'))
atomic_exch = AtomicOp(
'Exch', ('int32', 'uint32', 'uint64', 'float32'))
atomic_min = AtomicOp(
'Min', ('int32', 'uint32', 'uint64'))
atomic_max = AtomicOp(
'Max', ('int32', 'uint32', 'uint64'))
atomic_inc = AtomicOp(
'Inc', ('uint32',))
atomic_dec = AtomicOp(
'Dec', ('uint32',))
atomic_cas = AtomicOp(
'CAS',
('int32', 'uint32', 'uint64')
+ (() if runtime.is_hip else ('uint16',)))
atomic_and = AtomicOp(
'And', ('int32', 'uint32', 'uint64'))
atomic_or = AtomicOp(
'Or', ('int32', 'uint32', 'uint64'))
atomic_xor = AtomicOp(
'Xor', ('int32', 'uint32', 'uint64'))
# warp-shuffle functions
_shfl_dtypes = (
('int32', 'uint32', 'int64', 'float32', 'float64')
+ (() if runtime.is_hip else ('uint64', 'float16')))
shfl_sync = WarpShuffleOp('', _shfl_dtypes)
shfl_up_sync = WarpShuffleOp('up', _shfl_dtypes)
shfl_down_sync = WarpShuffleOp('down', _shfl_dtypes)
shfl_xor_sync = WarpShuffleOp('xor', _shfl_dtypes)
| [((86, 15, 86, 35), 'cupyx.jit._internal_types.Data.init', 'Data.init', ({(86, 25, 86, 29): 'stop', (86, 31, 86, 34): 'env'}, {}), '(stop, env)', False, 'from cupyx.jit._internal_types import Data\n'), ((87, 16, 87, 37), 'cupyx.jit._internal_types.Data.init', 'Data.init', ({(87, 26, 87, 31): 'start', (87, 33, 87, 36): 'env'}, {}), '(start, env)', False, 'from cupyx.jit._internal_types import Data\n'), ((88, 15, 88, 35), 'cupyx.jit._internal_types.Data.init', 'Data.init', ({(88, 25, 88, 29): 'step', (88, 31, 88, 34): 'env'}, {}), '(step, env)', False, 'from cupyx.jit._internal_types import Data\n'), ((104, 15, 104, 79), 'cupyx.jit._internal_types.Range', 'Range', (), '', False, 'from cupyx.jit._internal_types import Range\n'), ((160, 15, 160, 56), 'cupyx.jit._internal_types.Data', 'Data', ({(160, 20, 160, 37): '"""__syncthreads()"""', (160, 39, 160, 55): '_cuda_types.void'}, {}), "('__syncthreads()', _cuda_types.void)", False, 'from cupyx.jit._internal_types import Data\n'), ((194, 15, 194, 43), 'cupyx.jit._internal_types.Data', 'Data', ({(194, 20, 194, 24): 'code', (194, 26, 194, 42): '_cuda_types.void'}, {}), '(code, _cuda_types.void)', False, 'from cupyx.jit._internal_types import Data\n'), ((214, 21, 214, 46), 'cupyx.jit._cuda_types.Scalar', '_cuda_types.Scalar', ({(214, 40, 214, 45): 'dtype'}, {}), '(dtype)', False, 'from cupyx.jit import _cuda_types\n'), ((259, 16, 259, 37), 'cupyx.jit._internal_types.Data.init', 'Data.init', ({(259, 26, 259, 31): 'array', (259, 33, 259, 36): 'env'}, {}), '(array, env)', False, 'from cupyx.jit._internal_types import Data\n'), ((262, 17, 262, 54), 'cupyx.jit._compile._indexing', '_compile._indexing', ({(262, 36, 262, 41): 'array', (262, 43, 262, 48): 'index', (262, 50, 262, 53): 'env'}, {}), '(array, index, env)', False, 'from cupyx.jit import _compile\n'), ((271, 16, 271, 71), 'cupyx.jit._compile._astype_scalar', '_compile._astype_scalar', ({(271, 40, 271, 45): 'value', (271, 47, 271, 52): 'ctype', (271, 54, 271, 65): '"""same_kind"""', (271, 67, 271, 70): 'env'}, {}), "(value, ctype, 'same_kind', env)", False, 'from cupyx.jit import _compile\n'), ((272, 16, 272, 37), 'cupyx.jit._internal_types.Data.init', 'Data.init', ({(272, 26, 272, 31): 'value', (272, 33, 272, 36): 'env'}, {}), '(value, env)', False, 'from cupyx.jit._internal_types import Data\n'), ((291, 15, 291, 32), 'cupyx.jit._internal_types.Data', 'Data', ({(291, 20, 291, 24): 'code', (291, 26, 291, 31): 'ctype'}, {}), '(code, ctype)', False, 'from cupyx.jit._internal_types import Data\n'), ((350, 16, 350, 60), 'cupyx.jit._cuda_types.Tuple', '_cuda_types.Tuple', ({(350, 34, 350, 59): '[_cuda_types.uint32] * ndim'}, {}), '([_cuda_types.uint32] * ndim)', False, 'from cupyx.jit import _cuda_types\n'), ((351, 15, 351, 62), 'cupyx.jit._internal_types.Data', 'Data', ({(351, 20, 351, 54): 'f"""thrust::make_tuple({elts_code})"""', (351, 56, 351, 61): 'ctype'}, {}), "(f'thrust::make_tuple({elts_code})', ctype)", False, 'from cupyx.jit._internal_types import Data\n'), ((374, 14, 374, 33), 'cupyx.jit._internal_types.Data.init', 'Data.init', ({(374, 24, 374, 27): 'var', (374, 29, 374, 32): 'env'}, {}), '(var, env)', False, 'from cupyx.jit._internal_types import Data\n'), ((394, 17, 394, 76), 'cupyx.jit._compile._astype_scalar', '_compile._astype_scalar', ({(394, 41, 394, 47): 'val_id', (394, 49, 394, 57): 'val_id_t', (394, 59, 394, 70): '"""same_kind"""', (394, 72, 394, 75): 'env'}, {}), "(val_id, val_id_t, 'same_kind', env)", False, 'from cupyx.jit import _compile\n'), ((395, 17, 395, 39), 'cupyx.jit._internal_types.Data.init', 'Data.init', ({(395, 27, 395, 33): 'val_id', (395, 35, 395, 38): 'env'}, {}), '(val_id, env)', False, 'from cupyx.jit._internal_types import Data\n'), ((403, 16, 404, 55), 'cupyx.jit._compile._astype_scalar', '_compile._astype_scalar', ({(404, 12, 404, 17): 'width', (404, 19, 404, 36): '_cuda_types.int32', (404, 38, 404, 49): '"""same_kind"""', (404, 51, 404, 54): 'env'}, {}), "(width, _cuda_types.int32, 'same_kind', env)", False, 'from cupyx.jit import _compile\n'), ((405, 16, 405, 37), 'cupyx.jit._internal_types.Data.init', 'Data.init', ({(405, 26, 405, 31): 'width', (405, 33, 405, 36): 'env'}, {}), '(width, env)', False, 'from cupyx.jit._internal_types import Data\n'), ((409, 15, 409, 32), 'cupyx.jit._internal_types.Data', 'Data', ({(409, 20, 409, 24): 'code', (409, 26, 409, 31): 'ctype'}, {}), '(code, ctype)', False, 'from cupyx.jit._internal_types import Data\n'), ((441, 15, 441, 51), 'cupyx.jit._internal_types.Data', 'Data', ({(441, 20, 441, 30): '"""LaneId()"""', (441, 32, 441, 50): '_cuda_types.uint32'}, {}), "('LaneId()', _cuda_types.uint32)", False, 'from cupyx.jit._internal_types import Data\n'), ((98, 20, 98, 43), 'cupyx.jit._cuda_types.Scalar', '_cuda_types.Scalar', ({(98, 39, 98, 42): 'int'}, {}), '(int)', False, 'from cupyx.jit import _cuda_types\n'), ((120, 20, 120, 43), 'cupyx.jit._cuda_types.Scalar', '_cuda_types.Scalar', ({(120, 39, 120, 42): '"""q"""'}, {}), "('q')", False, 'from cupyx.jit import _cuda_types\n'), ((188, 19, 189, 58), 'cupyx.jit._compile._astype_scalar', '_compile._astype_scalar', ({(189, 16, 189, 20): 'mask', (189, 22, 189, 39): '_cuda_types.int32', (189, 41, 189, 52): '"""same_kind"""', (189, 54, 189, 57): 'env'}, {}), "(mask, _cuda_types.int32, 'same_kind', env)", False, 'from cupyx.jit import _compile\n'), ((190, 19, 190, 39), 'cupyx.jit._internal_types.Data.init', 'Data.init', ({(190, 29, 190, 33): 'mask', (190, 35, 190, 38): 'env'}, {}), '(mask, env)', False, 'from cupyx.jit._internal_types import Data\n'), ((217, 25, 217, 75), 'cupyx.jit._cuda_types.SharedMem', '_cuda_types.SharedMem', ({(217, 47, 217, 57): 'child_type', (217, 59, 217, 63): 'size', (217, 65, 217, 74): 'alignment'}, {}), '(child_type, size, alignment)', False, 'from cupyx.jit import _cuda_types\n'), ((220, 26, 220, 53), 'cupyx.jit._cuda_types.Ptr', '_cuda_types.Ptr', ({(220, 42, 220, 52): 'child_type'}, {}), '(child_type)', False, 'from cupyx.jit import _cuda_types\n'), ((285, 21, 285, 77), 'cupyx.jit._compile._astype_scalar', '_compile._astype_scalar', ({(285, 45, 285, 51): 'value2', (285, 53, 285, 58): 'ctype', (285, 60, 285, 71): '"""same_kind"""', (285, 73, 285, 76): 'env'}, {}), "(value2, ctype, 'same_kind', env)", False, 'from cupyx.jit import _compile\n'), ((286, 21, 286, 43), 'cupyx.jit._internal_types.Data.init', 'Data.init', ({(286, 31, 286, 37): 'value2', (286, 39, 286, 42): 'env'}, {}), '(value2, env)', False, 'from cupyx.jit._internal_types import Data\n'), ((384, 12, 384, 75), 'warnings.warn', 'warnings.warn', ({(384, 26, 384, 58): 'f"""mask {mask} is ignored on HIP"""', (384, 60, 384, 74): 'RuntimeWarning'}, {}), "(f'mask {mask} is ignored on HIP', RuntimeWarning)", False, 'import warnings\n'), ((75, 16, 77, 59), 'warnings.warn', 'warnings.warn', ({(76, 20, 77, 58): '"""loop unrolling is ignored as the unroll value is non-positive or greater than INT_MAX"""'}, {}), "(\n 'loop unrolling is ignored as the unroll value is non-positive or greater than INT_MAX'\n )", False, 'import warnings\n'), ((131, 35, 132, 44), 'cupyx.jit._compile._call_ufunc', '_compile._call_ufunc', ({(132, 12, 132, 24): 'cupy.minimum', (132, 26, 132, 32): '(a, b)', (132, 34, 132, 38): 'None', (132, 40, 132, 43): 'env'}, {}), '(cupy.minimum, (a, b), None, env)', False, 'from cupyx.jit import _compile\n'), ((143, 35, 144, 44), 'cupyx.jit._compile._call_ufunc', '_compile._call_ufunc', ({(144, 12, 144, 24): 'cupy.maximum', (144, 26, 144, 32): '(a, b)', (144, 34, 144, 38): 'None', (144, 40, 144, 43): 'env'}, {}), '(cupy.maximum, (a, b), None, env)', False, 'from cupyx.jit import _compile\n'), ((181, 16, 181, 79), 'warnings.warn', 'warnings.warn', ({(181, 30, 181, 62): 'f"""mask {mask} is ignored on HIP"""', (181, 64, 181, 78): 'RuntimeWarning'}, {}), "(f'mask {mask} is ignored on HIP', RuntimeWarning)", False, 'import warnings\n'), ((268, 20, 268, 47), 'cupy_backends.cuda.api.runtime.runtimeGetVersion', 'runtime.runtimeGetVersion', ({}, {}), '()', False, 'from cupy_backends.cuda.api import runtime\n'), ((402, 20, 402, 32), 'cupyx.jit._internal_types.Constant', 'Constant', ({(402, 29, 402, 31): '(64)'}, {}), '(64)', False, 'from cupyx.jit._internal_types import Constant\n'), ((402, 56, 402, 68), 'cupyx.jit._internal_types.Constant', 'Constant', ({(402, 65, 402, 67): '(32)'}, {}), '(32)', False, 'from cupyx.jit._internal_types import Constant\n'), ((52, 32, 52, 43), 'cupyx.jit._internal_types.Constant', 'Constant', ({(52, 41, 52, 42): '(0)'}, {}), '(0)', False, 'from cupyx.jit._internal_types import Constant\n'), ((52, 54, 52, 65), 'cupyx.jit._internal_types.Constant', 'Constant', ({(52, 63, 52, 64): '(1)'}, {}), '(1)', False, 'from cupyx.jit._internal_types import Constant\n'), ((277, 19, 277, 46), 'cupy_backends.cuda.api.runtime.runtimeGetVersion', 'runtime.runtimeGetVersion', ({}, {}), '()', False, 'from cupy_backends.cuda.api import runtime\n'), ((54, 50, 54, 61), 'cupyx.jit._internal_types.Constant', 'Constant', ({(54, 59, 54, 60): '(1)'}, {}), '(1)', False, 'from cupyx.jit._internal_types import Constant\n'), ((281, 23, 281, 54), 'cupy.cuda.device.get_compute_capability', 'device.get_compute_capability', ({}, {}), '()', False, 'from cupy.cuda import device\n')] |
jinrunheng/base-of-python | python-basic-grammer/python-basic/02-python-variables-and-string/string_strip_demo.py | 595bdbc8bfaf2136d8f1f9ea82c03b84aeaf0a39 | # 字符串删除空白
str1 = " hello "
print(str1)
print(len(str1))
# 去除两端的空格
print(str1.strip())
print(len(str1.strip()))
# 去除左侧的空格
print(str1.lstrip())
print(len(str1.lstrip()))
# 去除右侧的空格
print(str1.rstrip())
print(len(str1.rstrip())) | [] |
hyperiongeo/bruges | bruges/util/__init__.py | 6d9a3aae86aaa53107caaa20e9aafa390358b0f8 | # -*- coding: utf-8 -*-
from .util import rms
from .util import moving_average
from .util import moving_avg_conv
from .util import moving_avg_fft
from .util import normalize
from .util import next_pow2
from .util import top_and_tail
from .util import extrapolate
from .util import nearest
from .util import deprecated
from .util import apply_along_axis
from .util import sigmoid
| [] |
CrankySupertoon01/Toontown-2 | toontown/estate/DistributedHouseDoor.py | 60893d104528a8e7eb4aced5d0015f22e203466d | from toontown.toonbase.ToonBaseGlobal import *
from panda3d.core import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from direct.distributed import DistributedObject
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.MessengerGlobal import messenger
from direct.fsm import ClassicFSM
from toontown.building import DistributedDoor
from toontown.hood import ZoneUtil
from toontown.suit import Suit
from toontown.building import FADoorCodes
from toontown.building import DoorTypes
from toontown.estate.DistributedHouse import DistributedHouse
class DistributedHouseDoor(DistributedDoor.DistributedDoor):
def __init__(self, cr):
DistributedDoor.DistributedDoor.__init__(self, cr)
def disable(self):
DistributedDoor.DistributedDoor.disable(self)
self.ignoreAll()
def setZoneIdAndBlock(self, zoneId, block):
self.houseId = block
DistributedDoor.DistributedDoor.setZoneIdAndBlock(self, zoneId, block)
def getTriggerName(self):
return 'door_trigger_' + str(self.houseId)
def hideDoorParts(self):
try:
self.findDoorNode('doorFrameHoleRight').hide()
self.findDoorNode('doorFrameHoleLeft').hide()
except:
pass
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
if self.doorType == DoorTypes.EXT_STANDARD:
house = base.cr.doId2do.get(self.houseId)
if not isinstance(house, DistributedHouse):
self.notify.error('tried to use {0} as house'.format(house.__class__.__name__))
if house and house.house_loaded:
self.__gotRelatedHouse()
else:
self.acceptOnce('houseLoaded-%d' % self.houseId, self.__gotRelatedHouse)
elif self.doorType == DoorTypes.INT_STANDARD:
door = render.find('**/leftDoor;+s')
if door.isEmpty():
self.acceptOnce('houseInteriorLoaded-%d' % self.zoneId, self.__gotRelatedHouse)
else:
self.__gotRelatedHouse()
def __gotRelatedHouse(self):
self.doPostAnnounceGenerate()
self.bHasFlat = not self.findDoorNode('door*flat', True).isEmpty()
self.hideDoorParts()
building = self.getBuilding()
doorTrigger = building.find('**/door_trigger*')
doorTrigger.setName(self.getTriggerName())
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
self.acceptOnce('clearOutToonInterior', self.doorTrigger)
self.zoneDoneLoading = 0
def getBuilding(self, allowEmpty = False):
if 'building' not in self.__dict__:
if self.doorType == DoorTypes.INT_STANDARD:
door = render.find('**/leftDoor;+s')
self.building = door.getParent()
elif self.doorType == DoorTypes.EXT_STANDARD:
if self.houseId:
self.building = self.cr.playGame.hood.loader.houseId2house.get(self.houseId, None)
if allowEmpty:
return self.building
return self.building
def isInterior(self):
if self.doorType == DoorTypes.INT_STANDARD:
return 1
return 0
def getDoorNodePath(self):
if self.doorType == DoorTypes.INT_STANDARD:
otherNP = render.find('**/door_origin')
elif self.doorType == DoorTypes.EXT_STANDARD:
building = self.getBuilding()
otherNP = building.find('**/door')
if otherNP.isEmpty():
otherNP = building.find('**/door_origin')
else:
self.notify.error('No such door type as ' + str(self.doorType))
return otherNP
def enterClosing(self, ts):
doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight')
if doorFrameHoleRight.isEmpty():
self.notify.warning('enterClosing(): did not find doorFrameHoleRight')
return
rightDoor = self.findDoorNode('rightDoor')
if rightDoor.isEmpty():
self.notify.warning('enterClosing(): did not find rightDoor')
return
otherNP = self.getDoorNodePath()
trackName = 'doorClose-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0, hpr=VBase3(0, 0, 0), startHpr=VBase3(h, 0, 0), other=otherNP, blendType='easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node=rightDoor), name=trackName)
self.doorTrack.start(ts)
if hasattr(self, 'done'):
base.cr.playGame.hood.loader.setHouse(self.houseId)
zoneId = self.otherZoneId
if self.doorType == DoorTypes.EXT_STANDARD:
whereTo = 'house'
else:
whereTo = 'estate'
request = {'loader': 'safeZoneLoader',
'where': whereTo,
'how': 'doorIn',
'hoodId': ToontownGlobals.MyEstate,
'zoneId': zoneId,
'shardId': None,
'avId': -1,
'allowRedirect': 0,
'doorDoId': self.otherDoId}
messenger.send('doorDoneEvent', [request])
return
| [((20, 8, 20, 58), 'toontown.building.DistributedDoor.DistributedDoor.__init__', 'DistributedDoor.DistributedDoor.__init__', ({(20, 49, 20, 53): 'self', (20, 55, 20, 57): 'cr'}, {}), '(self, cr)', False, 'from toontown.building import DistributedDoor\n'), ((23, 8, 23, 53), 'toontown.building.DistributedDoor.DistributedDoor.disable', 'DistributedDoor.DistributedDoor.disable', ({(23, 48, 23, 52): 'self'}, {}), '(self)', False, 'from toontown.building import DistributedDoor\n'), ((28, 8, 28, 78), 'toontown.building.DistributedDoor.DistributedDoor.setZoneIdAndBlock', 'DistributedDoor.DistributedDoor.setZoneIdAndBlock', ({(28, 58, 28, 62): 'self', (28, 64, 28, 70): 'zoneId', (28, 72, 28, 77): 'block'}, {}), '(self, zoneId, block)', False, 'from toontown.building import DistributedDoor\n'), ((41, 8, 41, 66), 'direct.distributed.DistributedObject.DistributedObject.announceGenerate', 'DistributedObject.DistributedObject.announceGenerate', ({(41, 61, 41, 65): 'self'}, {}), '(self)', False, 'from direct.distributed import DistributedObject\n'), ((133, 12, 133, 54), 'direct.showbase.MessengerGlobal.messenger.send', 'messenger.send', ({(133, 27, 133, 42): '"""doorDoneEvent"""', (133, 44, 133, 53): '[request]'}, {}), "('doorDoneEvent', [request])", False, 'from direct.showbase.MessengerGlobal import messenger\n')] |
AGhaderi/spatial_attenNCM | Neuro-Cognitive Models/Runs/Nonhier_run/res_nonhier.py | 1f7edf17f55d804d2ae3360d23623c9ab5035518 | #!/home/a.ghaderi/.conda/envs/envjm/bin/python
# Model 2
import pystan
import pandas as pd
import numpy as np
import sys
sys.path.append('../../')
import utils
parts = 1
data = utils.get_data() #loading dateset
data = data[data['participant']==parts]
mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data for n200lat
obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and missing data for n200lat
N_mis = mis.shape[0] # number of missing data
N_obs = obs.shape[0] # number of observed data
modelfile = '../../stans/res_nonhier.stan' #reading the model span
f = open(modelfile, 'r')
model_wiener = f.read()
sm = pystan.StanModel(model_code=model_wiener)# Compile the model stan
ncohers = 2 #Number of coherence conditions
nspats = 2 #Number of spatial conditions
nconds = 4 #Number of conditions
y = data['y'].to_numpy()
cond_coher = data['cond_coher'].to_numpy()
cond_spat = data['cond_spat'].to_numpy()
conds = data['conds'].to_numpy()
n200lat = data['n200lat'].to_numpy()
#set inistial data for molde span
data_winner = {'N_obs':N_obs, #Number of trial-level observations
'N_mis':N_mis, #Number of trial-level mising data
'ncohers':ncohers, #Number of coherence conditions
'nspats':nspats, #Number of spatial conditions
'nconds':nconds, #Number of conditions
'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds for obervation and missing data
'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for each trial
'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for each trial
'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for each trial
'n200lat_obs':n200lat[obs]}; #n200 latency for each trial observation
# setting MCMC arguments
niter = 10000
nwarmup = 4000
nchains = 1
thin = 1
initials = [] # initial sampling
for c in range(0, nchains):
chaininit = {
'delta': np.random.uniform(1, 3, size=ncohers),
'alpha': np.random.uniform(.5, 1.),
'eta': np.random.uniform(.01, .2),
'res': np.random.uniform(.01, .02, size=nspats),
'n200sub': np.random.uniform(.11, .2, size=nconds),
'lambda': np.random.uniform(.01, .02),
'n200lat_mis': np.random.uniform(.11, .2, size = N_mis)
}
initials.append(chaininit)
# Train the model and generate samples
fit = sm.sampling(data=data_winner, iter=niter, chains=nchains, warmup=nwarmup, thin=thin, init=initials)
utils.to_pickle(stan_model=sm, stan_fit=fit, save_path='../../save/nonhier/'+str(parts)+'_res_nonhier.pkl')
| [((7, 0, 7, 25), 'sys.path.append', 'sys.path.append', ({(7, 16, 7, 24): '"""../../"""'}, {}), "('../../')", False, 'import sys\n'), ((11, 7, 11, 23), 'utils.get_data', 'utils.get_data', ({}, {}), '()', False, 'import utils\n'), ((23, 5, 23, 46), 'pystan.StanModel', 'pystan.StanModel', (), '', False, 'import pystan\n'), ((14, 6, 14, 61), 'numpy.where', 'np.where', ({(14, 15, 14, 60): "((data['n200lat'] < 0.101) | (data['n200lat'] > 0.248))"}, {}), "((data['n200lat'] < 0.101) | (data['n200lat'] > 0.248))", True, 'import numpy as np\n'), ((15, 6, 15, 61), 'numpy.where', 'np.where', ({(15, 15, 15, 60): "((data['n200lat'] > 0.101) & (data['n200lat'] < 0.248))"}, {}), "((data['n200lat'] > 0.101) & (data['n200lat'] < 0.248))", True, 'import numpy as np\n'), ((40, 20, 40, 51), 'numpy.concatenate', 'np.concatenate', ({(40, 35, 40, 50): '[y[obs], y[mis]]'}, {}), '([y[obs], y[mis]])', True, 'import numpy as np\n'), ((41, 29, 41, 78), 'numpy.concatenate', 'np.concatenate', ({(41, 44, 41, 77): '[cond_coher[obs], cond_coher[mis]]'}, {}), '([cond_coher[obs], cond_coher[mis]])', True, 'import numpy as np\n'), ((42, 28, 42, 75), 'numpy.concatenate', 'np.concatenate', ({(42, 43, 42, 74): '[cond_spat[obs], cond_spat[mis]]'}, {}), '([cond_spat[obs], cond_spat[mis]])', True, 'import numpy as np\n'), ((43, 24, 43, 63), 'numpy.concatenate', 'np.concatenate', ({(43, 39, 43, 62): '[conds[obs], conds[mis]]'}, {}), '([conds[obs], conds[mis]])', True, 'import numpy as np\n'), ((55, 17, 55, 54), 'numpy.random.uniform', 'np.random.uniform', (), '', True, 'import numpy as np\n'), ((56, 17, 56, 42), 'numpy.random.uniform', 'np.random.uniform', ({(56, 35, 56, 37): '(0.5)', (56, 39, 56, 41): '(1.0)'}, {}), '(0.5, 1.0)', True, 'import numpy as np\n'), ((57, 15, 57, 41), 'numpy.random.uniform', 'np.random.uniform', ({(57, 33, 57, 36): '(0.01)', (57, 38, 57, 40): '(0.2)'}, {}), '(0.01, 0.2)', True, 'import numpy as np\n'), ((58, 15, 58, 55), 'numpy.random.uniform', 'np.random.uniform', (), '', True, 'import numpy as np\n'), ((59, 19, 59, 58), 'numpy.random.uniform', 'np.random.uniform', (), '', True, 'import numpy as np\n'), ((60, 18, 60, 45), 'numpy.random.uniform', 'np.random.uniform', ({(60, 36, 60, 39): '(0.01)', (60, 41, 60, 44): '(0.02)'}, {}), '(0.01, 0.02)', True, 'import numpy as np\n'), ((61, 23, 61, 63), 'numpy.random.uniform', 'np.random.uniform', (), '', True, 'import numpy as np\n')] |
refgenomics/rq-dashboard | rq_dashboard/dashboard.py | cdfadd2b9aa9a66b0594fd5573e3c45fa8643f05 | from redis import Redis
from redis import from_url
from rq import push_connection, pop_connection
from rq.job import Job
from functools import wraps
import times
from flask import Blueprint
from flask import current_app, url_for, abort
from flask import render_template
from rq import Queue, Worker
from rq import cancel_job, requeue_job
from rq import get_failed_queue
from math import ceil
dashboard = Blueprint('rq_dashboard', __name__,
template_folder='templates',
static_folder='static',
)
@dashboard.before_request
def authentication_hook():
""" Allow the parent app to authenticate user's access to the dashboard
with it's own auth_handler method that must return True or False
"""
auth_handler = current_app.extensions['rq-dashboard'].auth_handler
if auth_handler and not auth_handler():
abort(401)
@dashboard.before_app_first_request
def setup_rq_connection():
if current_app.config.get('REDIS_URL'):
current_app.redis_conn = from_url(current_app.config.get('REDIS_URL'))
else:
current_app.redis_conn = Redis(host=current_app.config.get('REDIS_HOST', 'localhost'),
port=current_app.config.get('REDIS_PORT', 6379),
password=current_app.config.get('REDIS_PASSWORD', None),
db=current_app.config.get('REDIS_DB', 0))
@dashboard.before_request
def push_rq_connection():
push_connection(current_app.redis_conn)
@dashboard.teardown_request
def pop_rq_connection(exception=None):
pop_connection()
def jsonify(f):
@wraps(f)
def _wrapped(*args, **kwargs):
from flask import jsonify as flask_jsonify
try:
result_dict = f(*args, **kwargs)
except Exception as e:
result_dict = dict(status='error')
if current_app.config['DEBUG']:
result_dict['reason'] = str(e)
from traceback import format_exc
result_dict['exc_info'] = format_exc()
return flask_jsonify(**result_dict)
return _wrapped
def serialize_queues(queues):
return [dict(name=q.name, count=q.count, url=url_for('.overview',
queue_name=q.name)) for q in queues]
def serialize_date(dt):
if dt is None:
return None
return times.format(dt, 'UTC')
def serialize_job(job):
return dict(
id=job.id,
created_at=serialize_date(job.created_at),
enqueued_at=serialize_date(job.enqueued_at),
ended_at=serialize_date(job.ended_at),
origin=job.origin,
result=job._result,
exc_info=job.exc_info,
description=job.description)
def remove_none_values(input_dict):
return dict([ (k,v) for k,v in input_dict.items() if v is not None ])
def pagination_window(total_items, cur_page, per_page=5, window_size=10):
all_pages = range(1, int(ceil(total_items / float(per_page))) + 1)
results = all_pages
if (window_size >= 1):
pages_window_start = int(max(0, min(len(all_pages) - window_size, (cur_page-1) - ceil(window_size / 2.0))))
pages_window_end = int(pages_window_start + window_size)
result = all_pages[pages_window_start:pages_window_end]
return result
@dashboard.route('/', defaults={'queue_name': None, 'page': '1'})
@dashboard.route('/<queue_name>', defaults={'page': '1'})
@dashboard.route('/<queue_name>/<page>')
def overview(queue_name, page):
if queue_name is None:
# Show the failed queue by default if it contains any jobs
failed = Queue('failed')
if not failed.is_empty():
queue = failed
else:
queue = Queue()
else:
queue = Queue(queue_name)
return render_template('rq_dashboard/dashboard.html',
workers=Worker.all(),
queue=queue,
page=page,
queues=Queue.all(),
rq_url_prefix=url_for('.overview'))
@dashboard.route('/job/<job_id>/cancel', methods=['POST'])
@jsonify
def cancel_job_view(job_id):
rq_job = Job.fetch(job_id)
if rq_job.status == "failed":
rq_job.delete()
else:
rq_job.cancel()
return dict(status='OK')
@dashboard.route('/job/<job_id>/requeue', methods=['POST'])
@jsonify
def requeue_job_view(job_id):
requeue_job(job_id)
return dict(status='OK')
@dashboard.route('/requeue-all', methods=['GET', 'POST'])
@jsonify
def requeue_all():
fq = get_failed_queue()
job_ids = fq.job_ids
count = len(job_ids)
for job_id in job_ids:
requeue_job(job_id)
return dict(status='OK', count=count)
@dashboard.route('/queue/<queue_name>/empty', methods=['POST'])
@jsonify
def empty_queue(queue_name):
q = Queue(queue_name)
q.empty()
return dict(status='OK')
@dashboard.route('/queue/<queue_name>/compact', methods=['POST'])
@jsonify
def compact_queue(queue_name):
q = Queue(queue_name)
q.compact()
return dict(status='OK')
@dashboard.route('/queues.json')
@jsonify
def list_queues():
queues = serialize_queues(sorted(Queue.all()))
return dict(queues=queues)
@dashboard.route('/jobs/<queue_name>/<page>.json')
@jsonify
def list_jobs(queue_name, page):
current_page = int(page)
queue = Queue(queue_name)
per_page = 5
total_items = queue.count
pages_numbers_in_window = pagination_window(total_items, current_page, per_page)
pages_in_window = [ dict(number=p, url=url_for('.overview',
queue_name=queue_name, page=p)) for p in pages_numbers_in_window ]
last_page = int(ceil(total_items / float(per_page)))
prev_page = None
if current_page > 1:
prev_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page-1)))
next_page = None
if current_page < last_page:
next_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page+1)))
pagination = remove_none_values(
dict(pages_in_window=pages_in_window,
next_page=next_page,
prev_page=prev_page))
offset = (current_page - 1) * per_page
jobs = [serialize_job(job) for job in queue.get_jobs(offset, per_page)]
return dict(name=queue.name, jobs=jobs, pagination=pagination)
@dashboard.route('/workers.json')
@jsonify
def list_workers():
def serialize_queue_names(worker):
return [q.name for q in worker.queues]
workers = [dict(name=worker.name, queues=serialize_queue_names(worker),
state=worker.get_state()) for worker in Worker.all()]
return dict(workers=workers)
@dashboard.context_processor
def inject_interval():
interval = current_app.config.get('RQ_POLL_INTERVAL', 2500)
return dict(poll_interval=interval)
| [((16, 12, 19, 9), 'flask.Blueprint', 'Blueprint', (), '', False, 'from flask import Blueprint\n'), ((34, 7, 34, 42), 'flask.current_app.config.get', 'current_app.config.get', ({(34, 30, 34, 41): '"""REDIS_URL"""'}, {}), "('REDIS_URL')", False, 'from flask import current_app, url_for, abort\n'), ((45, 4, 45, 43), 'rq.push_connection', 'push_connection', ({(45, 20, 45, 42): 'current_app.redis_conn'}, {}), '(current_app.redis_conn)', False, 'from rq import push_connection, pop_connection\n'), ((50, 4, 50, 20), 'rq.pop_connection', 'pop_connection', ({}, {}), '()', False, 'from rq import push_connection, pop_connection\n'), ((54, 5, 54, 13), 'functools.wraps', 'wraps', ({(54, 11, 54, 12): 'f'}, {}), '(f)', False, 'from functools import wraps\n'), ((77, 11, 77, 34), 'times.format', 'times.format', ({(77, 24, 77, 26): 'dt', (77, 28, 77, 33): '"""UTC"""'}, {}), "(dt, 'UTC')", False, 'import times\n'), ((131, 13, 131, 30), 'rq.job.Job.fetch', 'Job.fetch', ({(131, 23, 131, 29): 'job_id'}, {}), '(job_id)', False, 'from rq.job import Job\n'), ((144, 4, 144, 23), 'rq.requeue_job', 'requeue_job', ({(144, 16, 144, 22): 'job_id'}, {}), '(job_id)', False, 'from rq import cancel_job, requeue_job\n'), ((151, 9, 151, 27), 'rq.get_failed_queue', 'get_failed_queue', ({}, {}), '()', False, 'from rq import get_failed_queue\n'), ((162, 8, 162, 25), 'rq.Queue', 'Queue', ({(162, 14, 162, 24): 'queue_name'}, {}), '(queue_name)', False, 'from rq import Queue, Worker\n'), ((170, 8, 170, 25), 'rq.Queue', 'Queue', ({(170, 14, 170, 24): 'queue_name'}, {}), '(queue_name)', False, 'from rq import Queue, Worker\n'), ((186, 12, 186, 29), 'rq.Queue', 'Queue', ({(186, 18, 186, 28): 'queue_name'}, {}), '(queue_name)', False, 'from rq import Queue, Worker\n'), ((224, 15, 224, 63), 'flask.current_app.config.get', 'current_app.config.get', ({(224, 38, 224, 56): '"""RQ_POLL_INTERVAL"""', (224, 58, 224, 62): '2500'}, {}), "('RQ_POLL_INTERVAL', 2500)", False, 'from flask import current_app, url_for, abort\n'), ((29, 8, 29, 18), 'flask.abort', 'abort', ({(29, 14, 29, 17): '(401)'}, {}), '(401)', False, 'from flask import current_app, url_for, abort\n'), ((65, 15, 65, 43), 'flask.jsonify', 'flask_jsonify', ({}, {}), '(**result_dict)', True, 'from flask import jsonify as flask_jsonify\n'), ((112, 17, 112, 32), 'rq.Queue', 'Queue', ({(112, 23, 112, 31): '"""failed"""'}, {}), "('failed')", False, 'from rq import Queue, Worker\n'), ((118, 16, 118, 33), 'rq.Queue', 'Queue', ({(118, 22, 118, 32): 'queue_name'}, {}), '(queue_name)', False, 'from rq import Queue, Worker\n'), ((155, 8, 155, 27), 'rq.requeue_job', 'requeue_job', ({(155, 20, 155, 26): 'job_id'}, {}), '(job_id)', False, 'from rq import cancel_job, requeue_job\n'), ((35, 42, 35, 77), 'flask.current_app.config.get', 'current_app.config.get', ({(35, 65, 35, 76): '"""REDIS_URL"""'}, {}), "('REDIS_URL')", False, 'from flask import current_app, url_for, abort\n'), ((116, 20, 116, 27), 'rq.Queue', 'Queue', ({}, {}), '()', False, 'from rq import Queue, Worker\n'), ((121, 20, 121, 32), 'rq.Worker.all', 'Worker.all', ({}, {}), '()', False, 'from rq import Queue, Worker\n'), ((124, 19, 124, 30), 'rq.Queue.all', 'Queue.all', ({}, {}), '()', False, 'from rq import Queue, Worker\n'), ((125, 26, 125, 46), 'flask.url_for', 'url_for', ({(125, 34, 125, 45): '""".overview"""'}, {}), "('.overview')", False, 'from flask import current_app, url_for, abort\n'), ((178, 37, 178, 48), 'rq.Queue.all', 'Queue.all', ({}, {}), '()', False, 'from rq import Queue, Worker\n'), ((219, 48, 219, 60), 'rq.Worker.all', 'Worker.all', ({}, {}), '()', False, 'from rq import Queue, Worker\n'), ((37, 44, 37, 93), 'flask.current_app.config.get', 'current_app.config.get', ({(37, 67, 37, 79): '"""REDIS_HOST"""', (37, 81, 37, 92): '"""localhost"""'}, {}), "('REDIS_HOST', 'localhost')", False, 'from flask import current_app, url_for, abort\n'), ((38, 28, 38, 70), 'flask.current_app.config.get', 'current_app.config.get', ({(38, 51, 38, 63): '"""REDIS_PORT"""', (38, 65, 38, 69): '6379'}, {}), "('REDIS_PORT', 6379)", False, 'from flask import current_app, url_for, abort\n'), ((39, 32, 39, 78), 'flask.current_app.config.get', 'current_app.config.get', ({(39, 55, 39, 71): '"""REDIS_PASSWORD"""', (39, 73, 39, 77): 'None'}, {}), "('REDIS_PASSWORD', None)", False, 'from flask import current_app, url_for, abort\n'), ((40, 26, 40, 63), 'flask.current_app.config.get', 'current_app.config.get', ({(40, 49, 40, 59): '"""REDIS_DB"""', (40, 61, 40, 62): '0'}, {}), "('REDIS_DB', 0)", False, 'from flask import current_app, url_for, abort\n'), ((70, 49, 71, 26), 'flask.url_for', 'url_for', (), '', False, 'from flask import current_app, url_for, abort\n'), ((190, 43, 191, 38), 'flask.url_for', 'url_for', (), '', False, 'from flask import current_app, url_for, abort\n'), ((196, 29, 196, 95), 'flask.url_for', 'url_for', (), '', False, 'from flask import current_app, url_for, abort\n'), ((200, 29, 200, 95), 'flask.url_for', 'url_for', (), '', False, 'from flask import current_app, url_for, abort\n'), ((64, 42, 64, 54), 'traceback.format_exc', 'format_exc', ({}, {}), '()', False, 'from traceback import format_exc\n'), ((100, 89, 100, 112), 'math.ceil', 'ceil', ({(100, 94, 100, 111): 'window_size / 2.0'}, {}), '(window_size / 2.0)', False, 'from math import ceil\n')] |
moas/mfdata | layers/layer1_python3/0300_acquisition/acquisition/__init__.py | ca9460c3783ddfd6ad022c96a0a8bf0e65fa36b2 | from acquisition.step import AcquisitionStep
from acquisition.stats import AcquisitionStatsDClient
from acquisition.move_step import AcquisitionMoveStep
from acquisition.delete_step import AcquisitionDeleteStep
from acquisition.batch_step import AcquisitionBatchStep
from acquisition.reinject_step import AcquisitionReinjectStep
from acquisition.fork_step import AcquisitionForkStep
from acquisition.archive_step import AcquisitionArchiveStep
from acquisition.listener import AcquisitionListener
__all__ = ['AcquisitionStep', 'AcquisitionBatchStep',
'AcquisitionMoveStep', 'AcquisitionDeleteStep',
'AcquisitionReinjectStep', 'AcquisitionForkStep',
'AcquisitionArchiveStep', 'AcquisitionStatsDClient',
'AcquisitionListener']
| [] |
Semicheche/foa_frappe_docker | frappe-bench/apps/erpnext/erpnext/non_profit/doctype/member/member.py | a186b65d5e807dd4caf049e8aeb3620a799c1225 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
from frappe.contacts.address_and_contact import load_address_and_contact
class Member(Document):
def onload(self):
"""Load address and contacts in `__onload`"""
load_address_and_contact(self)
def validate(self):
self.validate_email_type(self.email)
def validate_email_type(self, email):
from frappe.utils import validate_email_add
validate_email_add(email.strip(), True) | [((13, 2, 13, 32), 'frappe.contacts.address_and_contact.load_address_and_contact', 'load_address_and_contact', ({(13, 27, 13, 31): 'self'}, {}), '(self)', False, 'from frappe.contacts.address_and_contact import load_address_and_contact\n')] |
ayyuriss/TRHPO | networks/networks.py | 56a06d3593504647b75589ab87b5c96bdab74c9f | from torch import nn
import numpy as np
import base.basenetwork as BaseN
from networks.cholesky import CholeskyBlock
class FCNet(BaseN.BaseNetwork):
name ="FCNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCNet,self).__init__(input_shape,output_shape,owner_name)
x = input_shape
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(np.prod(x), 1024),nn.Softplus(),
nn.Linear(1024,512),nn.Tanh(),
nn.Linear(512,256),
BaseN.EigenLayer(256,self.output_shape[0]))
self.compile()
class FCSpectralNet(BaseN.BaseNetwork):
name ="FCSpectralNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name)
x = input_shape
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(),
nn.Linear(1024,1024),BaseN.AdaptiveTanh(),
nn.Linear(1024,512),BaseN.AdaptiveTanh(),
BaseN.EigenLayer(512,self.output_shape[0]))
self.compile()
class FCSpectralMNet(BaseN.BaseNetwork):
name ="FCSpectralMNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name)
x = input_shape
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(np.prod(x), 1024),nn.ReLU(),
nn.Linear(1024,1024),nn.ReLU(),
nn.Linear(1024,512),nn.ReLU(),
nn.Linear(512,self.output_shape[0]-1),nn.Tanh(),
BaseN.EigenLayer())
self.compile()
class FCNetQ(BaseN.BaseNetwork):
name ="FCNetQ"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCNetQ,self).__init__(input_shape,output_shape,owner_name)
x = int(np.prod(input_shape))
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(x,x),nn.Tanh(),
nn.Linear(x,self.output_shape[0]))
self.compile()
class ConvNet(BaseN.BaseNetwork):
name="ConvNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNet,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(),
BaseN.conv3_2(8, 16),nn.ReLU(),
BaseN.conv3_2(8, 8))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(),
nn.Linear(512,256),
BaseN.EigenLayer(256,self.output_shape[0],bias=False))
self.compile()
class ConvNetBias(BaseN.BaseNetwork):
name="ConvNetBias"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),
BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(),
BaseN.conv3_2(12, 16),
BaseN.conv3_2(16, 20))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(),
nn.Linear(512,256),
BaseN.EigenLayer(256,self.output_shape[0],bias=False))
self.compile()
class FCConvNet(BaseN.BaseNetwork):
name="FCConvNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCConvNet,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(),
BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,512),BaseN.AdaptiveTanh(),
nn.Linear(512,256),
BaseN.EigenLayer(256,self.output_shape[0],bias=False))
self.compile()
class FCConvNetBias(BaseN.BaseNetwork):
name="FCConvNetBias"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(),
BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,1024),BaseN.AdaptiveTanh(),
nn.Linear(1024,256),
BaseN.EigenLayer(256,self.output_shape[0],bias=False))
self.compile()
class ConvNet2(BaseN.BaseNetwork):
name="ConvNet2"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNet2,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(),
BaseN.conv3_2(3, 6),BaseN.conv3_2(6, 12))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,512),
nn.Linear(512,1024),nn.Tanh(),
nn.Linear(1024,512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,256),
BaseN.EigenLayer(256,self.output_shape[0]))
self.compile()
class ConvNetBig(BaseN.BaseNetwork):
name="ConvNetBig"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),
BaseN.conv3_2(8, 16),nn.Softplus(),
BaseN.conv3_2(16, 32))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,512),
BaseN.EigenLayer(512,self.output_shape[0]))
self.compile()
class ConvNetBigBias(BaseN.BaseNetwork):
name="ConvNetBigBias"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(),
BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,512),
BaseN.EigenLayer(512,self.output_shape[0],bias=False))
self.compile()
class ConvNetBigAtari(BaseN.BaseNetwork):
name="ConvNetBigAtari"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),
BaseN.conv3_2(8, 16),
BaseN.conv3_2(16, 32))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,512),nn.Tanh(),
nn.Linear(512,1024),
BaseN.EigenLayer(1024,self.output_shape[0]))
self.compile()
class ConvNetBigS(BaseN.BaseNetwork):
name="ConvNetBigS"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),
BaseN.conv3_2(8, 16),
BaseN.conv3_2(16, 32))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,512),
nn.Linear(512,self.output_shape[0]))
self.compile()
class ConvNetMNIST(BaseN.BaseNetwork):
name = "ConvNetMNIST"
def __init__(self,input_shape,output_shape,**kwargs):
super(ConvNetMNIST,self).__init__(**kwargs)
self.n = output_shape
self.conv = [BaseN.ResNetBlock(1,32),
BaseN.conv3_2(32,64)]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0], nn.Softplus(),
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
BaseN.EigenLayer(256,self.output_shape[0]))
self.compile()
class ConvNetSimple(BaseN.BaseNetwork):
name="ConvNetSimple"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,self.output_shape[0]))
self.compile()
class FCNetSimple(BaseN.BaseNetwork):
name ="FCNetSimple"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name)
x = input_shape
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(np.prod(x), 1024),nn.Softplus(),
nn.Linear(1024,512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,self.output_shape[0]))
self.compile()
| [((64, 12, 64, 56), 'base.basenetwork.output_shape', 'BaseN.output_shape', ({(64, 31, 64, 43): 'self.conv[0]', (64, 44, 64, 55): 'input_shape'}, {}), '(self.conv[0], input_shape)', True, 'import base.basenetwork as BaseN\n'), ((80, 12, 80, 56), 'base.basenetwork.output_shape', 'BaseN.output_shape', ({(80, 31, 80, 43): 'self.conv[0]', (80, 44, 80, 55): 'input_shape'}, {}), '(self.conv[0], input_shape)', True, 'import base.basenetwork as BaseN\n'), ((95, 12, 95, 56), 'base.basenetwork.output_shape', 'BaseN.output_shape', ({(95, 31, 95, 43): 'self.conv[0]', (95, 44, 95, 55): 'input_shape'}, {}), '(self.conv[0], input_shape)', True, 'import base.basenetwork as BaseN\n'), ((111, 12, 111, 56), 'base.basenetwork.output_shape', 'BaseN.output_shape', ({(111, 31, 111, 43): 'self.conv[0]', (111, 44, 111, 55): 'input_shape'}, {}), '(self.conv[0], input_shape)', True, 'import base.basenetwork as BaseN\n'), ((127, 12, 127, 56), 'base.basenetwork.output_shape', 'BaseN.output_shape', ({(127, 31, 127, 43): 'self.conv[0]', (127, 44, 127, 55): 'input_shape'}, {}), '(self.conv[0], input_shape)', True, 'import base.basenetwork as BaseN\n'), ((147, 12, 147, 56), 'base.basenetwork.output_shape', 'BaseN.output_shape', ({(147, 31, 147, 43): 'self.conv[0]', (147, 44, 147, 55): 'input_shape'}, {}), '(self.conv[0], input_shape)', True, 'import base.basenetwork as BaseN\n'), ((163, 12, 163, 56), 'base.basenetwork.output_shape', 'BaseN.output_shape', ({(163, 31, 163, 43): 'self.conv[0]', (163, 44, 163, 55): 'input_shape'}, {}), '(self.conv[0], input_shape)', True, 'import base.basenetwork as BaseN\n'), ((179, 12, 179, 56), 'base.basenetwork.output_shape', 'BaseN.output_shape', ({(179, 31, 179, 43): 'self.conv[0]', (179, 44, 179, 55): 'input_shape'}, {}), '(self.conv[0], input_shape)', True, 'import base.basenetwork as BaseN\n'), ((196, 12, 196, 56), 'base.basenetwork.output_shape', 'BaseN.output_shape', ({(196, 31, 196, 43): 'self.conv[0]', (196, 44, 196, 55): 'input_shape'}, {}), '(self.conv[0], input_shape)', True, 'import base.basenetwork as BaseN\n'), ((212, 12, 212, 56), 'base.basenetwork.output_shape', 'BaseN.output_shape', ({(212, 31, 212, 43): 'self.conv[0]', (212, 44, 212, 55): 'input_shape'}, {}), '(self.conv[0], input_shape)', True, 'import base.basenetwork as BaseN\n'), ((226, 12, 226, 56), 'base.basenetwork.output_shape', 'BaseN.output_shape', ({(226, 31, 226, 43): 'self.conv[0]', (226, 44, 226, 55): 'input_shape'}, {}), '(self.conv[0], input_shape)', True, 'import base.basenetwork as BaseN\n'), ((12, 35, 12, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((13, 63, 13, 76), 'torch.nn.Softplus', 'nn.Softplus', ({}, {}), '()', False, 'from torch import nn\n'), ((14, 35, 14, 54), 'torch.nn.Linear', 'nn.Linear', ({(14, 45, 14, 49): '1024', (14, 50, 14, 53): '512'}, {}), '(1024, 512)', False, 'from torch import nn\n'), ((14, 55, 14, 64), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from torch import nn\n'), ((15, 35, 15, 53), 'torch.nn.Linear', 'nn.Linear', ({(15, 45, 15, 48): '512', (15, 49, 15, 52): '256'}, {}), '(512, 256)', False, 'from torch import nn\n'), ((16, 35, 16, 77), 'base.basenetwork.EigenLayer', 'BaseN.EigenLayer', ({(16, 52, 16, 55): '256', (16, 56, 16, 76): 'self.output_shape[0]'}, {}), '(256, self.output_shape[0])', True, 'import base.basenetwork as BaseN\n'), ((25, 35, 25, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((26, 63, 26, 83), 'base.basenetwork.AdaptiveTanh', 'BaseN.AdaptiveTanh', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((27, 35, 27, 55), 'torch.nn.Linear', 'nn.Linear', ({(27, 45, 27, 49): '1024', (27, 50, 27, 54): '1024'}, {}), '(1024, 1024)', False, 'from torch import nn\n'), ((27, 56, 27, 76), 'base.basenetwork.AdaptiveTanh', 'BaseN.AdaptiveTanh', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((28, 35, 28, 54), 'torch.nn.Linear', 'nn.Linear', ({(28, 45, 28, 49): '1024', (28, 50, 28, 53): '512'}, {}), '(1024, 512)', False, 'from torch import nn\n'), ((28, 55, 28, 75), 'base.basenetwork.AdaptiveTanh', 'BaseN.AdaptiveTanh', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((29, 35, 29, 77), 'base.basenetwork.EigenLayer', 'BaseN.EigenLayer', ({(29, 52, 29, 55): '512', (29, 56, 29, 76): 'self.output_shape[0]'}, {}), '(512, self.output_shape[0])', True, 'import base.basenetwork as BaseN\n'), ((37, 35, 37, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((38, 63, 38, 72), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', False, 'from torch import nn\n'), ((39, 35, 39, 55), 'torch.nn.Linear', 'nn.Linear', ({(39, 45, 39, 49): '1024', (39, 50, 39, 54): '1024'}, {}), '(1024, 1024)', False, 'from torch import nn\n'), ((39, 56, 39, 65), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', False, 'from torch import nn\n'), ((40, 35, 40, 54), 'torch.nn.Linear', 'nn.Linear', ({(40, 45, 40, 49): '1024', (40, 50, 40, 53): '512'}, {}), '(1024, 512)', False, 'from torch import nn\n'), ((40, 55, 40, 64), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', False, 'from torch import nn\n'), ((41, 35, 41, 72), 'torch.nn.Linear', 'nn.Linear', ({(41, 45, 41, 48): '512', (41, 49, 41, 71): 'self.output_shape[0] - 1'}, {}), '(512, self.output_shape[0] - 1)', False, 'from torch import nn\n'), ((41, 73, 41, 82), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from torch import nn\n'), ((42, 35, 42, 53), 'base.basenetwork.EigenLayer', 'BaseN.EigenLayer', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((49, 16, 49, 36), 'numpy.prod', 'np.prod', ({(49, 24, 49, 35): 'input_shape'}, {}), '(input_shape)', True, 'import numpy as np\n'), ((50, 35, 50, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((51, 35, 51, 49), 'torch.nn.Linear', 'nn.Linear', ({(51, 45, 51, 46): 'x', (51, 47, 51, 48): 'x'}, {}), '(x, x)', False, 'from torch import nn\n'), ((51, 50, 51, 59), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from torch import nn\n'), ((52, 35, 52, 68), 'torch.nn.Linear', 'nn.Linear', ({(52, 45, 52, 46): 'x', (52, 47, 52, 67): 'self.output_shape[0]'}, {}), '(x, self.output_shape[0])', False, 'from torch import nn\n'), ((66, 35, 66, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((67, 62, 67, 82), 'base.basenetwork.AdaptiveTanh', 'BaseN.AdaptiveTanh', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((68, 35, 68, 53), 'torch.nn.Linear', 'nn.Linear', ({(68, 45, 68, 48): '512', (68, 49, 68, 52): '256'}, {}), '(512, 256)', False, 'from torch import nn\n'), ((69, 35, 69, 88), 'base.basenetwork.EigenLayer', 'BaseN.EigenLayer', (), '', True, 'import base.basenetwork as BaseN\n'), ((82, 35, 82, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((83, 62, 83, 82), 'base.basenetwork.AdaptiveTanh', 'BaseN.AdaptiveTanh', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((84, 35, 84, 53), 'torch.nn.Linear', 'nn.Linear', ({(84, 45, 84, 48): '512', (84, 49, 84, 52): '256'}, {}), '(512, 256)', False, 'from torch import nn\n'), ((85, 35, 85, 88), 'base.basenetwork.EigenLayer', 'BaseN.EigenLayer', (), '', True, 'import base.basenetwork as BaseN\n'), ((97, 35, 97, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((99, 35, 99, 53), 'torch.nn.Linear', 'nn.Linear', ({(99, 45, 99, 48): '512', (99, 49, 99, 52): '512'}, {}), '(512, 512)', False, 'from torch import nn\n'), ((99, 54, 99, 74), 'base.basenetwork.AdaptiveTanh', 'BaseN.AdaptiveTanh', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((100, 35, 100, 53), 'torch.nn.Linear', 'nn.Linear', ({(100, 45, 100, 48): '512', (100, 49, 100, 52): '256'}, {}), '(512, 256)', False, 'from torch import nn\n'), ((101, 35, 101, 88), 'base.basenetwork.EigenLayer', 'BaseN.EigenLayer', (), '', True, 'import base.basenetwork as BaseN\n'), ((113, 35, 113, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((115, 35, 115, 54), 'torch.nn.Linear', 'nn.Linear', ({(115, 45, 115, 48): '512', (115, 49, 115, 53): '1024'}, {}), '(512, 1024)', False, 'from torch import nn\n'), ((115, 55, 115, 75), 'base.basenetwork.AdaptiveTanh', 'BaseN.AdaptiveTanh', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((116, 35, 116, 54), 'torch.nn.Linear', 'nn.Linear', ({(116, 45, 116, 49): '1024', (116, 50, 116, 53): '256'}, {}), '(1024, 256)', False, 'from torch import nn\n'), ((117, 35, 117, 88), 'base.basenetwork.EigenLayer', 'BaseN.EigenLayer', (), '', True, 'import base.basenetwork as BaseN\n'), ((129, 35, 129, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((131, 35, 131, 53), 'torch.nn.Linear', 'nn.Linear', ({(131, 45, 131, 48): '512', (131, 49, 131, 52): '256'}, {}), '(512, 256)', False, 'from torch import nn\n'), ((131, 54, 131, 63), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from torch import nn\n'), ((132, 35, 132, 53), 'torch.nn.Linear', 'nn.Linear', ({(132, 45, 132, 48): '256', (132, 49, 132, 52): '512'}, {}), '(256, 512)', False, 'from torch import nn\n'), ((133, 35, 133, 54), 'torch.nn.Linear', 'nn.Linear', ({(133, 45, 133, 48): '512', (133, 49, 133, 53): '1024'}, {}), '(512, 1024)', False, 'from torch import nn\n'), ((133, 55, 133, 64), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from torch import nn\n'), ((134, 35, 134, 54), 'torch.nn.Linear', 'nn.Linear', ({(134, 45, 134, 49): '1024', (134, 50, 134, 53): '512'}, {}), '(1024, 512)', False, 'from torch import nn\n'), ((135, 35, 135, 53), 'torch.nn.Linear', 'nn.Linear', ({(135, 45, 135, 48): '512', (135, 49, 135, 52): '256'}, {}), '(512, 256)', False, 'from torch import nn\n'), ((135, 54, 135, 63), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from torch import nn\n'), ((136, 35, 136, 53), 'torch.nn.Linear', 'nn.Linear', ({(136, 45, 136, 48): '256', (136, 49, 136, 52): '256'}, {}), '(256, 256)', False, 'from torch import nn\n'), ((137, 35, 137, 77), 'base.basenetwork.EigenLayer', 'BaseN.EigenLayer', ({(137, 52, 137, 55): '256', (137, 56, 137, 76): 'self.output_shape[0]'}, {}), '(256, self.output_shape[0])', True, 'import base.basenetwork as BaseN\n'), ((149, 35, 149, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((151, 35, 151, 53), 'torch.nn.Linear', 'nn.Linear', ({(151, 45, 151, 48): '512', (151, 49, 151, 52): '256'}, {}), '(512, 256)', False, 'from torch import nn\n'), ((151, 54, 151, 63), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from torch import nn\n'), ((152, 35, 152, 53), 'torch.nn.Linear', 'nn.Linear', ({(152, 45, 152, 48): '256', (152, 49, 152, 52): '512'}, {}), '(256, 512)', False, 'from torch import nn\n'), ((153, 35, 153, 77), 'base.basenetwork.EigenLayer', 'BaseN.EigenLayer', ({(153, 52, 153, 55): '512', (153, 56, 153, 76): 'self.output_shape[0]'}, {}), '(512, self.output_shape[0])', True, 'import base.basenetwork as BaseN\n'), ((165, 35, 165, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((167, 35, 167, 53), 'torch.nn.Linear', 'nn.Linear', ({(167, 45, 167, 48): '512', (167, 49, 167, 52): '256'}, {}), '(512, 256)', False, 'from torch import nn\n'), ((167, 54, 167, 63), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from torch import nn\n'), ((168, 35, 168, 53), 'torch.nn.Linear', 'nn.Linear', ({(168, 45, 168, 48): '256', (168, 49, 168, 52): '512'}, {}), '(256, 512)', False, 'from torch import nn\n'), ((169, 35, 169, 88), 'base.basenetwork.EigenLayer', 'BaseN.EigenLayer', (), '', True, 'import base.basenetwork as BaseN\n'), ((181, 35, 181, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((183, 35, 183, 53), 'torch.nn.Linear', 'nn.Linear', ({(183, 45, 183, 48): '512', (183, 49, 183, 52): '512'}, {}), '(512, 512)', False, 'from torch import nn\n'), ((183, 54, 183, 63), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from torch import nn\n'), ((184, 35, 184, 54), 'torch.nn.Linear', 'nn.Linear', ({(184, 45, 184, 48): '512', (184, 49, 184, 53): '1024'}, {}), '(512, 1024)', False, 'from torch import nn\n'), ((185, 35, 185, 78), 'base.basenetwork.EigenLayer', 'BaseN.EigenLayer', ({(185, 52, 185, 56): '1024', (185, 57, 185, 77): 'self.output_shape[0]'}, {}), '(1024, self.output_shape[0])', True, 'import base.basenetwork as BaseN\n'), ((198, 35, 198, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((200, 35, 200, 53), 'torch.nn.Linear', 'nn.Linear', ({(200, 45, 200, 48): '512', (200, 49, 200, 52): '256'}, {}), '(512, 256)', False, 'from torch import nn\n'), ((200, 54, 200, 63), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from torch import nn\n'), ((201, 35, 201, 53), 'torch.nn.Linear', 'nn.Linear', ({(201, 45, 201, 48): '256', (201, 49, 201, 52): '512'}, {}), '(256, 512)', False, 'from torch import nn\n'), ((202, 35, 202, 70), 'torch.nn.Linear', 'nn.Linear', ({(202, 45, 202, 48): '512', (202, 49, 202, 69): 'self.output_shape[0]'}, {}), '(512, self.output_shape[0])', False, 'from torch import nn\n'), ((210, 21, 210, 44), 'base.basenetwork.ResNetBlock', 'BaseN.ResNetBlock', ({(210, 39, 210, 40): '(1)', (210, 41, 210, 43): '(32)'}, {}), '(1, 32)', True, 'import base.basenetwork as BaseN\n'), ((211, 25, 211, 45), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(211, 39, 211, 41): '(32)', (211, 42, 211, 44): '(64)'}, {}), '(32, 64)', True, 'import base.basenetwork as BaseN\n'), ((213, 49, 213, 62), 'torch.nn.Softplus', 'nn.Softplus', ({}, {}), '()', False, 'from torch import nn\n'), ((214, 36, 214, 51), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((216, 36, 216, 54), 'torch.nn.Linear', 'nn.Linear', ({(216, 46, 216, 49): '512', (216, 50, 216, 53): '256'}, {}), '(512, 256)', False, 'from torch import nn\n'), ((216, 55, 216, 64), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from torch import nn\n'), ((217, 36, 217, 78), 'base.basenetwork.EigenLayer', 'BaseN.EigenLayer', ({(217, 53, 217, 56): '256', (217, 57, 217, 77): 'self.output_shape[0]'}, {}), '(256, self.output_shape[0])', True, 'import base.basenetwork as BaseN\n'), ((228, 35, 228, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((230, 35, 230, 53), 'torch.nn.Linear', 'nn.Linear', ({(230, 45, 230, 48): '512', (230, 49, 230, 52): '256'}, {}), '(512, 256)', False, 'from torch import nn\n'), ((230, 54, 230, 63), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from torch import nn\n'), ((231, 35, 231, 70), 'torch.nn.Linear', 'nn.Linear', ({(231, 45, 231, 48): '256', (231, 49, 231, 69): 'self.output_shape[0]'}, {}), '(256, self.output_shape[0])', False, 'from torch import nn\n'), ((240, 35, 240, 50), 'base.basenetwork.Flatten', 'BaseN.Flatten', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((241, 63, 241, 76), 'torch.nn.Softplus', 'nn.Softplus', ({}, {}), '()', False, 'from torch import nn\n'), ((242, 35, 242, 54), 'torch.nn.Linear', 'nn.Linear', ({(242, 45, 242, 49): '1024', (242, 50, 242, 53): '512'}, {}), '(1024, 512)', False, 'from torch import nn\n'), ((243, 35, 243, 53), 'torch.nn.Linear', 'nn.Linear', ({(243, 45, 243, 48): '512', (243, 49, 243, 52): '256'}, {}), '(512, 256)', False, 'from torch import nn\n'), ((243, 54, 243, 63), 'torch.nn.Tanh', 'nn.Tanh', ({}, {}), '()', False, 'from torch import nn\n'), ((244, 35, 244, 70), 'torch.nn.Linear', 'nn.Linear', ({(244, 45, 244, 48): '256', (244, 49, 244, 69): 'self.output_shape[0]'}, {}), '(256, self.output_shape[0])', False, 'from torch import nn\n'), ((13, 45, 13, 55), 'numpy.prod', 'np.prod', ({(13, 53, 13, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((26, 45, 26, 55), 'numpy.prod', 'np.prod', ({(26, 53, 26, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((38, 45, 38, 55), 'numpy.prod', 'np.prod', ({(38, 53, 38, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((61, 35, 61, 67), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(61, 49, 61, 63): 'input_shape[0]', (61, 65, 61, 66): '(8)'}, {}), '(input_shape[0], 8)', True, 'import base.basenetwork as BaseN\n'), ((61, 68, 61, 77), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', False, 'from torch import nn\n'), ((62, 49, 62, 69), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(62, 63, 62, 64): '(8)', (62, 66, 62, 68): '(16)'}, {}), '(8, 16)', True, 'import base.basenetwork as BaseN\n'), ((62, 70, 62, 79), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', False, 'from torch import nn\n'), ((63, 49, 63, 68), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(63, 63, 63, 64): '(8)', (63, 66, 63, 67): '(8)'}, {}), '(8, 8)', True, 'import base.basenetwork as BaseN\n'), ((67, 45, 67, 55), 'numpy.prod', 'np.prod', ({(67, 53, 67, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((76, 35, 76, 67), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(76, 49, 76, 63): 'input_shape[0]', (76, 65, 76, 66): '(8)'}, {}), '(input_shape[0], 8)', True, 'import base.basenetwork as BaseN\n'), ((76, 68, 76, 81), 'torch.nn.Softplus', 'nn.Softplus', ({}, {}), '()', False, 'from torch import nn\n'), ((77, 35, 77, 55), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(77, 49, 77, 50): '(8)', (77, 52, 77, 54): '(12)'}, {}), '(8, 12)', True, 'import base.basenetwork as BaseN\n'), ((77, 56, 77, 76), 'base.basenetwork.AdaptiveTanh', 'BaseN.AdaptiveTanh', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((78, 35, 78, 56), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(78, 49, 78, 51): '(12)', (78, 53, 78, 55): '(16)'}, {}), '(12, 16)', True, 'import base.basenetwork as BaseN\n'), ((79, 35, 79, 56), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(79, 49, 79, 51): '(16)', (79, 53, 79, 55): '(20)'}, {}), '(16, 20)', True, 'import base.basenetwork as BaseN\n'), ((83, 45, 83, 55), 'numpy.prod', 'np.prod', ({(83, 53, 83, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((93, 35, 93, 67), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(93, 49, 93, 63): 'input_shape[0]', (93, 65, 93, 66): '(4)'}, {}), '(input_shape[0], 4)', True, 'import base.basenetwork as BaseN\n'), ((93, 68, 93, 81), 'torch.nn.Softplus', 'nn.Softplus', ({}, {}), '()', False, 'from torch import nn\n'), ((94, 35, 94, 54), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(94, 49, 94, 50): '(4)', (94, 52, 94, 53): '(8)'}, {}), '(4, 8)', True, 'import base.basenetwork as BaseN\n'), ((94, 55, 94, 75), 'base.basenetwork.AdaptiveTanh', 'BaseN.AdaptiveTanh', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((98, 45, 98, 55), 'numpy.prod', 'np.prod', ({(98, 53, 98, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((109, 35, 109, 67), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(109, 49, 109, 63): 'input_shape[0]', (109, 65, 109, 66): '(4)'}, {}), '(input_shape[0], 4)', True, 'import base.basenetwork as BaseN\n'), ((109, 68, 109, 77), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', False, 'from torch import nn\n'), ((110, 35, 110, 54), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(110, 49, 110, 50): '(4)', (110, 52, 110, 53): '(4)'}, {}), '(4, 4)', True, 'import base.basenetwork as BaseN\n'), ((110, 55, 110, 75), 'base.basenetwork.AdaptiveTanh', 'BaseN.AdaptiveTanh', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((114, 45, 114, 55), 'numpy.prod', 'np.prod', ({(114, 53, 114, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((125, 35, 125, 67), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(125, 49, 125, 63): 'input_shape[0]', (125, 65, 125, 66): '(3)'}, {}), '(input_shape[0], 3)', True, 'import base.basenetwork as BaseN\n'), ((125, 68, 125, 81), 'torch.nn.Softplus', 'nn.Softplus', ({}, {}), '()', False, 'from torch import nn\n'), ((126, 49, 126, 68), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(126, 63, 126, 64): '(3)', (126, 66, 126, 67): '(6)'}, {}), '(3, 6)', True, 'import base.basenetwork as BaseN\n'), ((126, 69, 126, 89), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(126, 83, 126, 84): '(6)', (126, 86, 126, 88): '(12)'}, {}), '(6, 12)', True, 'import base.basenetwork as BaseN\n'), ((130, 45, 130, 55), 'numpy.prod', 'np.prod', ({(130, 53, 130, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((144, 35, 144, 67), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(144, 49, 144, 63): 'input_shape[0]', (144, 65, 144, 66): '(8)'}, {}), '(input_shape[0], 8)', True, 'import base.basenetwork as BaseN\n'), ((144, 68, 144, 81), 'torch.nn.Softplus', 'nn.Softplus', ({}, {}), '()', False, 'from torch import nn\n'), ((145, 49, 145, 69), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(145, 63, 145, 64): '(8)', (145, 66, 145, 68): '(16)'}, {}), '(8, 16)', True, 'import base.basenetwork as BaseN\n'), ((145, 70, 145, 83), 'torch.nn.Softplus', 'nn.Softplus', ({}, {}), '()', False, 'from torch import nn\n'), ((146, 49, 146, 70), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(146, 63, 146, 65): '(16)', (146, 67, 146, 69): '(32)'}, {}), '(16, 32)', True, 'import base.basenetwork as BaseN\n'), ((150, 45, 150, 55), 'numpy.prod', 'np.prod', ({(150, 53, 150, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((161, 35, 161, 67), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(161, 49, 161, 63): 'input_shape[0]', (161, 65, 161, 66): '(4)'}, {}), '(input_shape[0], 4)', True, 'import base.basenetwork as BaseN\n'), ((161, 68, 161, 81), 'torch.nn.Softplus', 'nn.Softplus', ({}, {}), '()', False, 'from torch import nn\n'), ((162, 49, 162, 68), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(162, 63, 162, 64): '(4)', (162, 66, 162, 67): '(4)'}, {}), '(4, 4)', True, 'import base.basenetwork as BaseN\n'), ((162, 69, 162, 89), 'base.basenetwork.AdaptiveTanh', 'BaseN.AdaptiveTanh', ({}, {}), '()', True, 'import base.basenetwork as BaseN\n'), ((166, 45, 166, 55), 'numpy.prod', 'np.prod', ({(166, 53, 166, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((176, 35, 176, 67), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(176, 49, 176, 63): 'input_shape[0]', (176, 65, 176, 66): '(8)'}, {}), '(input_shape[0], 8)', True, 'import base.basenetwork as BaseN\n'), ((176, 68, 176, 81), 'torch.nn.Softplus', 'nn.Softplus', ({}, {}), '()', False, 'from torch import nn\n'), ((177, 49, 177, 69), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(177, 63, 177, 64): '(8)', (177, 66, 177, 68): '(16)'}, {}), '(8, 16)', True, 'import base.basenetwork as BaseN\n'), ((178, 49, 178, 70), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(178, 63, 178, 65): '(16)', (178, 67, 178, 69): '(32)'}, {}), '(16, 32)', True, 'import base.basenetwork as BaseN\n'), ((182, 45, 182, 55), 'numpy.prod', 'np.prod', ({(182, 53, 182, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((193, 35, 193, 67), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(193, 49, 193, 63): 'input_shape[0]', (193, 65, 193, 66): '(8)'}, {}), '(input_shape[0], 8)', True, 'import base.basenetwork as BaseN\n'), ((193, 68, 193, 81), 'torch.nn.Softplus', 'nn.Softplus', ({}, {}), '()', False, 'from torch import nn\n'), ((194, 49, 194, 69), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(194, 63, 194, 64): '(8)', (194, 66, 194, 68): '(16)'}, {}), '(8, 16)', True, 'import base.basenetwork as BaseN\n'), ((195, 49, 195, 70), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(195, 63, 195, 65): '(16)', (195, 67, 195, 69): '(32)'}, {}), '(16, 32)', True, 'import base.basenetwork as BaseN\n'), ((199, 45, 199, 55), 'numpy.prod', 'np.prod', ({(199, 53, 199, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((215, 46, 215, 56), 'numpy.prod', 'np.prod', ({(215, 54, 215, 55): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((225, 35, 225, 67), 'base.basenetwork.conv3_2', 'BaseN.conv3_2', ({(225, 49, 225, 63): 'input_shape[0]', (225, 65, 225, 66): '(4)'}, {}), '(input_shape[0], 4)', True, 'import base.basenetwork as BaseN\n'), ((225, 68, 225, 81), 'torch.nn.Softplus', 'nn.Softplus', ({}, {}), '()', False, 'from torch import nn\n'), ((229, 45, 229, 55), 'numpy.prod', 'np.prod', ({(229, 53, 229, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((241, 45, 241, 55), 'numpy.prod', 'np.prod', ({(241, 53, 241, 54): 'x'}, {}), '(x)', True, 'import numpy as np\n')] |
sverrirab/icenews | icenews/api_important_words.py | 10a5e13d4dcd5e95f746c4fec9821b4b48fa440e | import logging
from pydantic import BaseModel, Field
from typing import List
from .similar import important_words
from .server import app
_MAX_LENGTH = 2000
logger = logging.getLogger(__name__)
class ImportantWordsResponse(BaseModel):
important_words: List[str] = Field(..., description="List of lemmas")
class ImportantWordsRequest(BaseModel):
input_string: str = Field(
...,
description="Icelandic text for analysis.",
min_length=1,
max_length=_MAX_LENGTH,
)
# Strange things happen with error handling when using alias - splitting up into two input models
class ParseInputDeprecated(BaseModel):
input_string: str = Field(
...,
description="Icelandic text for analysis.",
min_length=1,
max_length=_MAX_LENGTH,
alias="in",
)
@app.post(
"/v1/important_words",
description="Find lemmas of important words",
response_model=ImportantWordsResponse,
)
def v1_important_words(*, data: ImportantWordsRequest):
return ImportantWordsResponse(important_words=important_words(data.input_string))
@app.post(
"/v1/parse",
description="Find lemmas of important words",
response_model=ImportantWordsResponse,
deprecated=True,
)
def v1_parse(*, data: ParseInputDeprecated):
logger.info(f"parse: {repr(data.input_string)}")
return ImportantWordsResponse(important_words=important_words(data.input_string))
| [((11, 9, 11, 36), 'logging.getLogger', 'logging.getLogger', ({(11, 27, 11, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((15, 33, 15, 73), 'pydantic.Field', 'Field', (), '', False, 'from pydantic import BaseModel, Field\n'), ((19, 24, 24, 5), 'pydantic.Field', 'Field', (), '', False, 'from pydantic import BaseModel, Field\n'), ((29, 24, 35, 5), 'pydantic.Field', 'Field', (), '', False, 'from pydantic import BaseModel, Field\n')] |
kmarcini/Learn-Python---Full-Course-for-Beginners-Tutorial- | try-except.py | 8ea4ef004d86fdf393980fd356edcf5b769bfeac |
try:
# num = 10 / 0
number = int(input("Enter a number: "))
print(number)
# catch specific errors
except ZeroDivisionError as err:
print(err)
except ValueError:
print("Invalid input")
| [] |
Gattocrucco/sipmfilter | peaksampl.py | 74215d6c53b998808fc6c677b46030234d996bdf | import numpy as np
def _adddims(a, b):
n = max(a.ndim, b.ndim)
a = np.expand_dims(a, tuple(range(n - a.ndim)))
b = np.expand_dims(b, tuple(range(n - b.ndim)))
return a, b
def _yz(y, z, t, yout):
"""
Shared implementation of peaksampl and sumpeaks.
"""
y = np.asarray(y)
z = np.asarray(z)
t = np.asarray(t)
y = np.pad(y, [(0, 0)] * (y.ndim - 1) + [(1, 1)], constant_values=yout)
offset = np.argmax(np.abs(y), axis=-1)
ampl = np.take_along_axis(y, np.expand_dims(offset, -1), -1)
ampl = np.squeeze(ampl, -1)
indices = t[..., :, None] - t[..., None, :] + offset[..., None, None]
indices = np.minimum(indices, y.shape[-1] - 1)
indices = np.maximum(indices, 0)
N = t.shape[-1]
indices = indices.reshape(indices.shape[:-2] + (N * N,))
n = max(y.ndim, indices.ndim)
y, indices = _adddims(y, indices)
y = np.take_along_axis(y, indices, -1)
eps = np.finfo(float).eps * N * N * ampl
y[..., ::N + 1] += np.expand_dims(eps, -1)
y = y.reshape(y.shape[:-1] + (N, N))
z = z[..., None]
y, z = _adddims(y, z)
return y, z
def peaksampl(y, z, t, yout=0):
"""
Get peak amplitudes given their sum.
This assumes that the position of the signals is given by peaks positions
even when they are summed.
Parameters
----------
y : array (..., M,)
The single signal shape.
z : array (..., N,)
The peak height in the sum of the signals for each peak.
t : int array (..., N,)
The indices of the peaks in the sum.
yout : scalar
The value of the signal outside the provided values, default 0.
Return
------
a : array (..., N),
The amplitudes such that z_i = sum_j a_j * y[t_i - t_j].
Broadcasted along non-last axis.
"""
y, z = _yz(y, z, t, yout)
a = np.linalg.solve(y, z)
return np.squeeze(a, -1)
def sumpeaks(y, a, t, yout=0):
"""
Compute the peak heights of a sum of signals.
This assumes that the position of the peaks is given by the signal
positions even when they are summed.
Parameters
----------
y : array (..., M,)
The single signal shape.
a : array (..., N,)
The amplitudes of the signals (`y` is multiplied by `a`).
t : int array (..., N,)
The indices of the position of the signals.
yout : scalar
The value of the signal outside the provided values, default 0.
Return
------
z : array (..., N,)
The peak height in the sum of the signals for each signal. Broadcasted
along non-last axis.
"""
y, a = _yz(y, a, t, yout)
z = np.matmul(y, a)
return np.squeeze(z, axis=-1)
if __name__ == '__main__':
from matplotlib import pyplot as plt
from scipy import signal
y = np.exp(-np.linspace(0, 10, 1000) / 10)
i = np.arange(1, 1000)
t0 = np.array([10, 340, 523])
a0 = np.array([3, 2, 1])
indices = i - t0[:, None]
z = np.take(y, indices, mode='clip') * a0[:, None]
z = np.where((indices < 0) | (indices >= len(y)), 0, z)
z = np.sum(z, axis=0)
t, = signal.argrelmax(z)
assert len(t) == len(t0)
a = peaksampl(y, z[t], t)
h = sumpeaks(y, a, t)
fig, ax = plt.subplots(num='peaksampl', clear=True)
ax.plot(z, color='#f55')
ax.vlines(t0, 0, a0, color='gray', zorder=3)
ax.vlines(t, 0, a, linestyle='--', zorder=3)
ax.plot(t, h, 'ok')
ax.grid('major', linestyle='--')
fig.tight_layout()
fig.show()
| [((13, 8, 13, 21), 'numpy.asarray', 'np.asarray', ({(13, 19, 13, 20): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((14, 8, 14, 21), 'numpy.asarray', 'np.asarray', ({(14, 19, 14, 20): 'z'}, {}), '(z)', True, 'import numpy as np\n'), ((15, 8, 15, 21), 'numpy.asarray', 'np.asarray', ({(15, 19, 15, 20): 't'}, {}), '(t)', True, 'import numpy as np\n'), ((17, 8, 17, 75), 'numpy.pad', 'np.pad', (), '', True, 'import numpy as np\n'), ((20, 11, 20, 31), 'numpy.squeeze', 'np.squeeze', ({(20, 22, 20, 26): 'ampl', (20, 28, 20, 30): '-1'}, {}), '(ampl, -1)', True, 'import numpy as np\n'), ((23, 14, 23, 50), 'numpy.minimum', 'np.minimum', ({(23, 25, 23, 32): 'indices', (23, 34, 23, 49): 'y.shape[-1] - 1'}, {}), '(indices, y.shape[-1] - 1)', True, 'import numpy as np\n'), ((24, 14, 24, 36), 'numpy.maximum', 'np.maximum', ({(24, 25, 24, 32): 'indices', (24, 34, 24, 35): '0'}, {}), '(indices, 0)', True, 'import numpy as np\n'), ((30, 8, 30, 42), 'numpy.take_along_axis', 'np.take_along_axis', ({(30, 27, 30, 28): 'y', (30, 30, 30, 37): 'indices', (30, 39, 30, 41): '-1'}, {}), '(y, indices, -1)', True, 'import numpy as np\n'), ((32, 23, 32, 46), 'numpy.expand_dims', 'np.expand_dims', ({(32, 38, 32, 41): 'eps', (32, 43, 32, 45): '(-1)'}, {}), '(eps, -1)', True, 'import numpy as np\n'), ((65, 8, 65, 29), 'numpy.linalg.solve', 'np.linalg.solve', ({(65, 24, 65, 25): 'y', (65, 27, 65, 28): 'z'}, {}), '(y, z)', True, 'import numpy as np\n'), ((66, 11, 66, 28), 'numpy.squeeze', 'np.squeeze', ({(66, 22, 66, 23): 'a', (66, 25, 66, 27): '(-1)'}, {}), '(a, -1)', True, 'import numpy as np\n'), ((93, 8, 93, 23), 'numpy.matmul', 'np.matmul', ({(93, 18, 93, 19): 'y', (93, 21, 93, 22): 'a'}, {}), '(y, a)', True, 'import numpy as np\n'), ((94, 11, 94, 33), 'numpy.squeeze', 'np.squeeze', (), '', True, 'import numpy as np\n'), ((101, 8, 101, 26), 'numpy.arange', 'np.arange', ({(101, 18, 101, 19): '1', (101, 21, 101, 25): '1000'}, {}), '(1, 1000)', True, 'import numpy as np\n'), ((102, 9, 102, 33), 'numpy.array', 'np.array', ({(102, 18, 102, 32): '[10, 340, 523]'}, {}), '([10, 340, 523])', True, 'import numpy as np\n'), ((103, 9, 103, 28), 'numpy.array', 'np.array', ({(103, 18, 103, 27): '[3, 2, 1]'}, {}), '([3, 2, 1])', True, 'import numpy as np\n'), ((107, 8, 107, 25), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((108, 9, 108, 28), 'scipy.signal.argrelmax', 'signal.argrelmax', ({(108, 26, 108, 27): 'z'}, {}), '(z)', False, 'from scipy import signal\n'), ((113, 14, 113, 55), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'from matplotlib import pyplot as plt\n'), ((18, 23, 18, 32), 'numpy.abs', 'np.abs', ({(18, 30, 18, 31): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((19, 33, 19, 59), 'numpy.expand_dims', 'np.expand_dims', ({(19, 48, 19, 54): 'offset', (19, 56, 19, 58): '-1'}, {}), '(offset, -1)', True, 'import numpy as np\n'), ((105, 8, 105, 40), 'numpy.take', 'np.take', (), '', True, 'import numpy as np\n'), ((100, 16, 100, 40), 'numpy.linspace', 'np.linspace', ({(100, 28, 100, 29): '0', (100, 31, 100, 33): '10', (100, 35, 100, 39): '1000'}, {}), '(0, 10, 1000)', True, 'import numpy as np\n'), ((31, 10, 31, 25), 'numpy.finfo', 'np.finfo', ({(31, 19, 31, 24): 'float'}, {}), '(float)', True, 'import numpy as np\n')] |
shrine-maiden-heavy-industries/arachne | arachne/hdl/xilinx/ps8/resources/pmu.py | 1d0320bf6e77653656f8ce1874900743452dbac4 | # SPDX-License-Identifier: BSD-3-Clause
from amaranth import *
from amaranth.build import *
from .common import PS8Resource, MIOSet
__all__ = (
'PMUResource',
)
class PMUResource(PS8Resource):
name = 'pmu'
claimable_mio = [ ]
def __init__(self):
super().__init__(0, 0, None, False)
def used_mio(self, **kwargs):
raise NotImplementedError # :nocov:
def generate_mapping(self, **kwargs):
raise NotImplementedError # :nocov:
| [] |
henrikhorluck/tdt4140-washlists | backend/Washlist/tests.py | a75c3bc38a3f915eb48cf3e9ecba848f46a2bcaa | from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from Dormroom.models import Dormroom
from SIFUser.mixins import AuthTestMixin
from StudentVillage.models import StudentVillage
from Washlist.jobs import reset_washlists
from Washlist.models.Templates import TemplateListItem, TemplateWashList
from Washlist.models.WashLists import ListItem
from Washlist.serializer import TemplateWashListSerializer
class WashListTemplateTest(TestCase):
room = None
def setUp(self):
village = StudentVillage.objects.create(name="Moholt")
self.room = Dormroom.objects.create(number=1, village=village)
temp_list = TemplateWashList.objects.create(title="Moholt")
village.templateWashList = temp_list
village.save()
def test_add_to_template_adds_to_each_list(self):
desc = "Vask badet"
temp_list = TemplateWashList.objects.get(title="Moholt")
TemplateListItem.objects.create(description=desc, washlist=temp_list).save()
self.assertEqual(desc, ListItem.objects.get(dormroom=self.room).description)
class WeeklyResetOfWashlistsTest(TestCase):
def setUp(self):
"""
Create a Washlist item that is completed
the method also sets up a village and a room to relate the Washlist item to
satisfy the db constraints
"""
village = StudentVillage.objects.create(name="Moholt")
self.room = Dormroom.objects.create(number=1, village=village)
temp_list = TemplateWashList.objects.create(title="Moholt")
village.templateWashList = temp_list
village.save()
self.item = ListItem.objects.create(
pk=1, dormroom=self.room, desc="Vask badet", completed=True
)
self.item.save()
def test_job_resets_items(self):
"""
Test that job to reset Washlist items when run manually actually rests the databases
Washlist items
"""
reset_washlists()
self.assertEqual(False, ListItem.objects.get(pk=1).completed)
class WashlistTemplateAPITest(AuthTestMixin):
def setUp(self):
super().setUp()
self.temp_list = TemplateWashList.objects.create(title="Moholt")
village = StudentVillage.objects.create(
name="Moholt", templateWashList=self.temp_list
)
self.room = Dormroom.objects.create(number=1, village=village)
self.item = ListItem.objects.create(
pk=1, dormroom=self.room, desc="Vask badet", completed=True
)
def test_get_template_list(self):
url = reverse("templatewashlist-list")
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data[0],
TemplateWashListSerializer(
TemplateWashList.objects.get(title="Moholt")
).data,
)
def test_get_detail_template_list(self):
url = reverse("templatewashlist-detail", args=[1])
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data,
TemplateWashListSerializer(
TemplateWashList.objects.get(title="Moholt")
).data,
)
def test_add_template_washlist(self):
url = reverse("templatewashlist-list")
response = self.client.post(
url, {"title": "Tyholt", "village": 1}, HTTP_AUTHORIZATION=self.auth
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
response.data,
TemplateWashListSerializer(
TemplateWashList.objects.get(title="Tyholt")
).data,
)
def test_partial_update(self):
url = reverse("templatewashlist-detail", args=[1])
response = self.client.patch(
url, {"title": "Berg"}, HTTP_AUTHORIZATION=self.auth
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data,
TemplateWashListSerializer(TemplateWashList.objects.get(title="Berg")).data,
)
def test_destroy(self):
url = reverse("templatewashlist-detail", args=[1])
response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(TemplateWashList.objects.count(), 0)
| [((19, 18, 19, 62), 'StudentVillage.models.StudentVillage.objects.create', 'StudentVillage.objects.create', (), '', False, 'from StudentVillage.models import StudentVillage\n'), ((20, 20, 20, 70), 'Dormroom.models.Dormroom.objects.create', 'Dormroom.objects.create', (), '', False, 'from Dormroom.models import Dormroom\n'), ((22, 20, 22, 67), 'Washlist.models.Templates.TemplateWashList.objects.create', 'TemplateWashList.objects.create', (), '', False, 'from Washlist.models.Templates import TemplateListItem, TemplateWashList\n'), ((28, 20, 28, 64), 'Washlist.models.Templates.TemplateWashList.objects.get', 'TemplateWashList.objects.get', (), '', False, 'from Washlist.models.Templates import TemplateListItem, TemplateWashList\n'), ((42, 18, 42, 62), 'StudentVillage.models.StudentVillage.objects.create', 'StudentVillage.objects.create', (), '', False, 'from StudentVillage.models import StudentVillage\n'), ((43, 20, 43, 70), 'Dormroom.models.Dormroom.objects.create', 'Dormroom.objects.create', (), '', False, 'from Dormroom.models import Dormroom\n'), ((45, 20, 45, 67), 'Washlist.models.Templates.TemplateWashList.objects.create', 'TemplateWashList.objects.create', (), '', False, 'from Washlist.models.Templates import TemplateListItem, TemplateWashList\n'), ((49, 20, 51, 9), 'Washlist.models.WashLists.ListItem.objects.create', 'ListItem.objects.create', (), '', False, 'from Washlist.models.WashLists import ListItem\n'), ((59, 8, 59, 25), 'Washlist.jobs.reset_washlists', 'reset_washlists', ({}, {}), '()', False, 'from Washlist.jobs import reset_washlists\n'), ((66, 25, 66, 72), 'Washlist.models.Templates.TemplateWashList.objects.create', 'TemplateWashList.objects.create', (), '', False, 'from Washlist.models.Templates import TemplateListItem, TemplateWashList\n'), ((67, 18, 69, 9), 'StudentVillage.models.StudentVillage.objects.create', 'StudentVillage.objects.create', (), '', False, 'from StudentVillage.models import StudentVillage\n'), ((70, 20, 70, 70), 'Dormroom.models.Dormroom.objects.create', 'Dormroom.objects.create', (), '', False, 'from Dormroom.models import Dormroom\n'), ((71, 20, 73, 9), 'Washlist.models.WashLists.ListItem.objects.create', 'ListItem.objects.create', (), '', False, 'from Washlist.models.WashLists import ListItem\n'), ((76, 14, 76, 46), 'django.urls.reverse', 'reverse', ({(76, 22, 76, 45): '"""templatewashlist-list"""'}, {}), "('templatewashlist-list')", False, 'from django.urls import reverse\n'), ((88, 14, 88, 58), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((100, 14, 100, 46), 'django.urls.reverse', 'reverse', ({(100, 22, 100, 45): '"""templatewashlist-list"""'}, {}), "('templatewashlist-list')", False, 'from django.urls import reverse\n'), ((114, 14, 114, 58), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((126, 14, 126, 58), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((130, 25, 130, 57), 'Washlist.models.Templates.TemplateWashList.objects.count', 'TemplateWashList.objects.count', ({}, {}), '()', False, 'from Washlist.models.Templates import TemplateListItem, TemplateWashList\n'), ((30, 8, 30, 77), 'Washlist.models.Templates.TemplateListItem.objects.create', 'TemplateListItem.objects.create', (), '', False, 'from Washlist.models.Templates import TemplateListItem, TemplateWashList\n'), ((31, 31, 31, 71), 'Washlist.models.WashLists.ListItem.objects.get', 'ListItem.objects.get', (), '', False, 'from Washlist.models.WashLists import ListItem\n'), ((60, 32, 60, 58), 'Washlist.models.WashLists.ListItem.objects.get', 'ListItem.objects.get', (), '', False, 'from Washlist.models.WashLists import ListItem\n'), ((83, 16, 83, 60), 'Washlist.models.Templates.TemplateWashList.objects.get', 'TemplateWashList.objects.get', (), '', False, 'from Washlist.models.Templates import TemplateListItem, TemplateWashList\n'), ((95, 16, 95, 60), 'Washlist.models.Templates.TemplateWashList.objects.get', 'TemplateWashList.objects.get', (), '', False, 'from Washlist.models.Templates import TemplateListItem, TemplateWashList\n'), ((109, 16, 109, 60), 'Washlist.models.Templates.TemplateWashList.objects.get', 'TemplateWashList.objects.get', (), '', False, 'from Washlist.models.Templates import TemplateListItem, TemplateWashList\n'), ((122, 39, 122, 81), 'Washlist.models.Templates.TemplateWashList.objects.get', 'TemplateWashList.objects.get', (), '', False, 'from Washlist.models.Templates import TemplateListItem, TemplateWashList\n')] |
piyush01123/vision | torchvision/prototype/models/mobilenetv3.py | c6722307e6860057b4855483d237fe00a213dcf6 | from functools import partial
from typing import Any, Optional, List
from torchvision.prototype.transforms import ImageNetEval
from torchvision.transforms.functional import InterpolationMode
from ...models.mobilenetv3 import MobileNetV3, _mobilenet_v3_conf, InvertedResidualConfig
from ._api import WeightsEnum, Weights
from ._meta import _IMAGENET_CATEGORIES
from ._utils import handle_legacy_interface, _ovewrite_named_param
__all__ = [
"MobileNetV3",
"MobileNet_V3_Large_Weights",
"MobileNet_V3_Small_Weights",
"mobilenet_v3_large",
"mobilenet_v3_small",
]
def _mobilenet_v3(
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> MobileNetV3:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
_COMMON_META = {
"task": "image_classification",
"architecture": "MobileNetV3",
"publication_year": 2019,
"size": (224, 224),
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
}
class MobileNet_V3_Large_Weights(WeightsEnum):
ImageNet1K_V1 = Weights(
url="https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth",
transforms=partial(ImageNetEval, crop_size=224),
meta={
**_COMMON_META,
"num_params": 5483032,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
"acc@1": 74.042,
"acc@5": 91.340,
},
)
ImageNet1K_V2 = Weights(
url="https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth",
transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 5483032,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning",
"acc@1": 75.274,
"acc@5": 92.566,
},
)
default = ImageNet1K_V2
class MobileNet_V3_Small_Weights(WeightsEnum):
ImageNet1K_V1 = Weights(
url="https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth",
transforms=partial(ImageNetEval, crop_size=224),
meta={
**_COMMON_META,
"num_params": 2542856,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
"acc@1": 67.668,
"acc@5": 87.402,
},
)
default = ImageNet1K_V1
@handle_legacy_interface(weights=("pretrained", MobileNet_V3_Large_Weights.ImageNet1K_V1))
def mobilenet_v3_large(
*, weights: Optional[MobileNet_V3_Large_Weights] = None, progress: bool = True, **kwargs: Any
) -> MobileNetV3:
weights = MobileNet_V3_Large_Weights.verify(weights)
inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_large", **kwargs)
return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
@handle_legacy_interface(weights=("pretrained", MobileNet_V3_Small_Weights.ImageNet1K_V1))
def mobilenet_v3_small(
*, weights: Optional[MobileNet_V3_Small_Weights] = None, progress: bool = True, **kwargs: Any
) -> MobileNetV3:
weights = MobileNet_V3_Small_Weights.verify(weights)
inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_small", **kwargs)
return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
| [((54, 19, 54, 55), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((65, 19, 65, 72), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((80, 19, 80, 55), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n')] |
soul4code/django-rest-auth | rest_auth/registration/urls.py | b7a2e06e7736865b18f6aab79dcd42210e06c28b | from django.urls import re_path
from django.views.generic import TemplateView
from .views import RegisterView, VerifyEmailView
urlpatterns = [
re_path(r'^$', RegisterView.as_view(), name='rest_register'),
re_path(r'^verify-email/$', VerifyEmailView.as_view(), name='rest_verify_email'),
# This url is used by django-allauth and empty TemplateView is
# defined just to allow reverse() call inside app, for example when email
# with verification link is being sent, then it's required to render email
# content.
# account_confirm_email - You should override this view to handle it in
# your API client somehow and then, send post to /verify-email/ endpoint
# with proper key.
# If you don't want to use API on that step, then just use ConfirmEmailView
# view from:
# django-allauth https://github.com/pennersr/django-allauth/blob/master/allauth/account/views.py
re_path(r'^account-confirm-email/(?P<key>[-:\w]+)/$', TemplateView.as_view(),
name='account_confirm_email'),
]
| [((22, 58, 22, 80), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ({}, {}), '()', False, 'from django.views.generic import TemplateView\n')] |
mental689/paddict | crawler/tests.py | 493268b62531c698687d42416edf61c602250133 | from django.test import TestCase
# Create your tests here.
from crawler.download import *
from crawler.models import *
class AnimalDownloadTestCase(TestCase):
def setUp(self):
self.stopWords = ["CVPR 2019", "Computer Vision Foundation."]
self.url = "/Users/tuannguyenanh/Desktop/cvpr2019.html"#"http://openaccess.thecvf.com/CVPR2019.py"
self.root = "http://openaccess.thecvf.com/"
self.event = Event.objects.filter(shortname='CVPR2019').first()
if self.event is None:
self.event = Event(shortname='CVPR2019')
self.event.save()
def test_animal_can_download(self):
#print(get_html(self.url))
f = open(self.url)
soup = parse_html(f.read())
f.close()
f = open('cvpr2019.bib', 'w')
print(soup.title)
bibtexs = soup.find_all("div", attrs={"class": "bibref"})
#print(bibtexs)
for bib in bibtexs:
print(bib.text)
f.write(bib.text.replace('<br>', '\n'))
f.close()
| [] |
petervdb/testrep1 | test_scripts/xml_example.py | 76b6eb3de2deb9596c055f252191e28587d5520c | #!/usr/bin/python3
from urllib.request import urlopen
from xml.etree.ElementTree import parse
# Download the RSS feed and parse it
u = urlopen('http://planet.python.org/rss20.xml')
doc = parse(u)
# Extract and output tags of interest
for item in doc.iterfind('channel/item'):
title = item.findtext('title')
date = item.findtext('pubDate')
link = item.findtext('link')
print(title)
print(date)
print(link)
print()
print("Program executed.")
| [((7, 4, 7, 49), 'urllib.request.urlopen', 'urlopen', ({(7, 12, 7, 48): '"""http://planet.python.org/rss20.xml"""'}, {}), "('http://planet.python.org/rss20.xml')", False, 'from urllib.request import urlopen\n'), ((8, 6, 8, 14), 'xml.etree.ElementTree.parse', 'parse', ({(8, 12, 8, 13): 'u'}, {}), '(u)', False, 'from xml.etree.ElementTree import parse\n')] |
anthowen/duplify | contacts/urls.py | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | """dedupper_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from contacts import views
admin.autodiscover()
urlpatterns = [
path('', views.index, name='contact_index'),
path('', views.index, name='lead_index'),
path('contacts/', views.contacts, name='contacts'),
path('leads/', views.leads, name='leads'),
path('table/', views.table, name='table'),
path('plotly/', views.plotly, name='plotly'),
# url(r'^keys', views.upload, name='keys'),
# path('key-gen/', views.key_gen, name='key-gen'),
# path('heroku/', generic.ListView.as_view(model=models.Contact), name='heroku'),
# path('run/', views.run, name='run'),
# path('sorted/<id>', views.merge, name='merge'),
# path('sorted/export/<type>', views.download, name='export'),
# path('sorted/report/<type>', views.download_times, name='report'),
]
| [((21, 0, 21, 20), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ({}, {}), '()', False, 'from django.contrib import admin\n'), ((24, 4, 24, 47), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((25, 4, 25, 44), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((26, 4, 26, 54), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((27, 4, 27, 45), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((28, 4, 28, 45), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((29, 4, 29, 48), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n')] |
klauer/pydm | pydm/PyQt/uic.py | e26aad58a7a0eb6f7321c61aa1dace646ff652bd | from . import qtlib
QT_LIB = qtlib.QT_LIB
if QT_LIB == 'PyQt5':
from PyQt5.uic import *
| [] |
pranaynanda/training-data-analyst | CPB100/lab2b/scheduled/ingestapp.py | f10ab778589129239fd5b277cfdefb41638eded5 | #!/usr/bin/env python
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START app]
import os
import logging
import transform
import flask
import google.cloud.storage as gcs
# [start config]
app = flask.Flask(__name__)
# Configure this environment variable via app.yaml
CLOUD_STORAGE_BUCKET = os.environ['CLOUD_STORAGE_BUCKET']
#
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
# [end config]
@app.route('/')
def welcome():
return '<html><a href="ingest">ingest last week</a> earthquake data</html>'
@app.route('/ingest')
def ingest_last_week():
try:
# verify that this is a cron job request
is_cron = flask.request.headers['X-Appengine-Cron']
logging.info('Received cron request {}'.format(is_cron))
# create png
url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv'
outfile = 'earthquakes.png'
status = 'scheduled ingest of {} to {}'.format(url, outfile)
logging.info(status)
transform.create_png(url, outfile)
# upload to cloud storage
client = gcs.Client()
bucket = client.get_bucket(CLOUD_STORAGE_BUCKET)
blob = gcs.Blob('earthquakes/earthquakes.png', bucket)
blob.upload_from_filename(outfile)
# change permissions
blob.make_public()
status = 'uploaded {} to {}'.format(outfile, blob.name)
logging.info(status)
except KeyError as e:
status = '<html>Sorry, this capability is accessible only by the Cron service, but I got a KeyError for {} -- try invoking it from <a href="{}"> the GCP console / AppEngine / taskqueues </a></html>'.format(
e, 'http://console.cloud.google.com/appengine/taskqueues?tab=CRON')
logging.info('Rejected non-Cron request')
return status
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request.')
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
# [END app]
| [((25, 6, 25, 27), 'flask.Flask', 'flask.Flask', ({(25, 18, 25, 26): '__name__'}, {}), '(__name__)', False, 'import flask\n'), ((29, 0, 29, 76), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((73, 4, 73, 60), 'logging.exception', 'logging.exception', ({(73, 22, 73, 59): '"""An error occurred during a request."""'}, {}), "('An error occurred during a request.')", False, 'import logging\n'), ((49, 8, 49, 28), 'logging.info', 'logging.info', ({(49, 21, 49, 27): 'status'}, {}), '(status)', False, 'import logging\n'), ((50, 8, 50, 42), 'transform.create_png', 'transform.create_png', ({(50, 29, 50, 32): 'url', (50, 34, 50, 41): 'outfile'}, {}), '(url, outfile)', False, 'import transform\n'), ((53, 17, 53, 29), 'google.cloud.storage.Client', 'gcs.Client', ({}, {}), '()', True, 'import google.cloud.storage as gcs\n'), ((55, 15, 55, 62), 'google.cloud.storage.Blob', 'gcs.Blob', ({(55, 24, 55, 53): '"""earthquakes/earthquakes.png"""', (55, 55, 55, 61): 'bucket'}, {}), "('earthquakes/earthquakes.png', bucket)", True, 'import google.cloud.storage as gcs\n'), ((61, 8, 61, 28), 'logging.info', 'logging.info', ({(61, 21, 61, 27): 'status'}, {}), '(status)', False, 'import logging\n'), ((66, 8, 66, 49), 'logging.info', 'logging.info', ({(66, 21, 66, 48): '"""Rejected non-Cron request"""'}, {}), "('Rejected non-Cron request')", False, 'import logging\n')] |
StarSky1/microsoft-python-study | index.py | 7fdc1ad87ac0eeb497013d7792f499416aac32d9 | name=input('input your name:');
print('hello');
print(name.capitalize()); | [] |
Machel54/-pass-locker- | credentials.py | 8ddf14cf755924ca903919177f9f878f65a08042 | import pyperclip
import random
import string
class Credential:
'''
class that generates new credentials
'''
credential_list = []
def __init__(self,username,sitename,password):
self.username = username
self.password = password
self.sitename = sitename
def save_credential(self):
'''
save_cred method saves the user objects into creds_list
'''
Credential.credential_list.append(self)
@classmethod
def display_credential(cls, user_name):
'''
Class method to show the list of credentials saved
'''
users_credential_list = []
for credential in cls.credential_list:
if credential.username == user_name:
users_credential_list.append(credential)
return users_credential_list
def delete_credential(self):
'''
delete_contact method deletes a saved credential from the credential_list
'''
Credential.credential_list.remove(self)
def generate_password(self):
'''
Function to generate a password where a user can generate a password based on their length of choice
'''
chars = "abcdefghijklmnopqrstuvwxyziABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890^?!?$%&/()=?`'+#*'~';:_,.-<>|"
password = ""
print("Use Char list = %s \n" % chars)
length = int(input("[*] Input Password Length: "))
while len(password) != length:
password = password + random.choice(chars)
if len(password) == length:
print("Password: %s" % password)
return password
@classmethod
def find_by_sitename(cls, sitename):
'''
Class method that takes a site name and returns the credential that matches that site
'''
for credential in cls.credential_list:
if credential.sitename == sitename:
return credential
@classmethod
def copy_credential(cls, sitename):
'''
Class method that copies a credentials details after the credentials sitename has been entered
'''
find_credential = Credential.find_by_sitename(sitename)
return pyperclip.copy(find_credential.password)
@classmethod
def credential_exist(cls, sitename):
'''
Method that checks if user exists from the credential list.
Returns:
Boolean: True or false depending if the credential exits
'''
the_credential = ""
for credential in Credential.credential_list:
if (credential.sitename == sitename):
the_credential = sitename
return the_credential
| [((67, 13, 67, 53), 'pyperclip.copy', 'pyperclip.copy', ({(67, 28, 67, 52): 'find_credential.password'}, {}), '(find_credential.password)', False, 'import pyperclip\n'), ((49, 35, 49, 55), 'random.choice', 'random.choice', ({(49, 49, 49, 54): 'chars'}, {}), '(chars)', False, 'import random\n')] |
mehulsatardekar/dice-on-demand | tests/test_dice.py | fa1ce1214975ba70c5d61390408a4de2418cf997 | import unittest
import app
def test_test():
assert app.test() == "Works!"
| [((6, 11, 6, 21), 'app.test', 'app.test', ({}, {}), '()', False, 'import app\n')] |
ChristopherWilks/snaptron | annotations/rip_annotated_junctions.py | 82ea3c5c5f2fbb726bba6d8c2bd0f7713291833a | #!/usr/bin/env python
"""
rip_annotated_junctions.py
Non-reference/species verson of this script, no lift-over
Rips junctions from annotation files contained in
jan_24_2016_annotations.tar.gz, as described in annotation_definition.md.
Junctions are dumped to stdout, which we record as annotated_junctions.tsv.gz
in runs/sra (same directory as this file). annotated_junctions.tsv.gz is
required by tables.py. The format of annotated_junctions.tsv.gz is
(tab-separated fields), one per junction
1. Chromosome
2. Start position (1-based, inclusive)
3. End position (1-based, inclusive)
4. Strand (+ or -)
5. anno source (abbreviation)
Must have
Stats are written to stderr
From the runs/sra/v2 directory, we ran
pypy rip_annotated_junctions.py
--hisat2-dir /path/to/hisat2-2.0.1-beta
--annotations path/to/jan_24_2016_annotations.tar.gz
| sort -k1,1 -k2,2n -k3,3n | gzip >annotated_junctions.tsv.gz
"""
import subprocess
import tarfile
import argparse
import tempfile
import atexit
import shutil
import glob
import os
import gzip
import sys
#file2source = {"hg19/gencode.v19.annotation.gtf.gz":"gC19","hg19/refGene.txt.gz":"rG19","hg19/acembly.txt.gz":"aC19","hg19/ccdsGene.txt.gz":"cG19","hg19/vegaGene.txt.gz":"vG19","hg19/knownGene.txt.gz":"kG19","hg19/mgcGenes.txt.gz":"mG19","hg19/lincRNAsTranscripts.txt.gz":"lR19","hg19/sibGene.txt.gz":"sG19","hg38/refGene.txt.gz":"rG38","hg38/ccdsGene.txt.gz":"cG38","hg38/gencode.v24.annotation.gtf.gz":"gC38","hg38/knownGene.txt.gz":"kG38","hg38/mgcGenes.txt.gz":"mG38","hg38/lincRNAsTranscripts.txt.gz":"lR38","hg38/sibGene.txt.gz":"sG38"}
#file2source = {"mm10/mouse10_ucsc_genes.gtf.gz":"kG10","mm10/mouse10_gencodevm11_comp.gtf.gz":"gC11","mm10/mouse10_gencodevm09_comp.gtf.gz":"gC09","mm10/mouse10_refseq_refgene.gtf.gz":"rG10"}
file2source = {"mouse10_ucsc_genes.gtf.gz":"kG10","mouse10_gencodevm11_comp.gtf.gz":"gC11","mouse10_gencodevm09_comp.gtf.gz":"gC09","mouse10_refseq_refgene.gtf.gz":"rG10"}
if __name__ == '__main__':
# Print file's docstring if -h is invoked
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Add command-line arguments
parser.add_argument('--extract-script-dir', type=str, required=True,
help=('path to directory containing extract_splice_sites.py script (from HISAT2)')
)
parser.add_argument('--annotations', type=str, required=True,
help=('full path to directory that has the annotation GTF(s) in gzipped format')
)
args = parser.parse_args()
extract_destination = tempfile.mkdtemp()
atexit.register(shutil.rmtree, extract_destination)
#with tarfile.open(args.annotations, 'r:gz') as tar:
# tar.extractall(path=extract_destination)
extract_splice_sites_path = os.path.join(args.extract_script_dir,
'extract_splice_sites.py')
containing_dir = os.path.dirname(os.path.realpath(__file__))
annotated_junctions_ = set()
for junction_file in glob.glob(
os.path.join(args.annotations, '*')
):
label = os.path.basename(junction_file)
datasource_code = file2source[label]
unique_junctions = set()
#extract_splice_sites_path prints 0-based, exon coords around junctions
#hence the +2 for the start here
extract_process = subprocess.Popen(' '.join([
sys.executable,
extract_splice_sites_path,
'<(gzip -cd %s)'
% junction_file
]),
shell=True,
executable='/bin/bash',
stdout=subprocess.PIPE
)
for line in extract_process.stdout:
tokens = line.strip().split('\t')
tokens[1] = int(tokens[1]) + 2
tokens[2] = int(tokens[2])
if tokens[2] < tokens[1]:
print >>sys.stderr, (
'Invalid junction ({}, {}, {}) from file {}. '
'Skipping.'
).format(
tokens[0], tokens[1], tokens[2], junction_file
)
continue
tokens.append(datasource_code)
junction_to_add = tuple(tokens)
annotated_junctions_.add(junction_to_add)
unique_junctions.add(junction_to_add)
extract_process.stdout.close()
exit_code = extract_process.wait()
if exit_code != 0:
raise RuntimeError(
'extract_splice_sites.py had nonzero exit code {}.'.format(
exit_code
)
)
print >>sys.stderr, 'Junctions in {}: {}'.format(
label,
len(unique_junctions)
)
junc2datasource = {}
for junction in annotated_junctions_:
if junction[:4] not in junc2datasource:
junc2datasource[junction[:4]]=set()
junc2datasource[junction[:4]].add(junction[4])
seen = set()
for junction in annotated_junctions_:
if junction[:4] not in seen:
sources = ",".join(sorted(junc2datasource[junction[:4]]))
print "%s\t%s" % ('\t'.join(map(str, junction[:4])),sources)
seen.add(junction[:4])
| [] |
Genlovy-Hoo/dramkit | dramkit/_tmp/VMD.py | fa3d2f35ebe9effea88a19e49d876b43d3c5c4c7 | # -*- coding: utf-8 -*-
import numpy as np
def vmd( signal, alpha, tau, K, DC, init, tol):
'''
用VMD分解算法时只要把信号输入进行分解就行了,只是对信号进行分解,和采样频率没有关系,
VMD的输入参数也没有采样频率。
VMD分解出的各分量在输出量 u 中,这个和信号的长度、信号的采样频率没有关系。
迭代时各分量的中心频率在输出量omega,可以用2*pi/fs*omega求出中心频率,
但迭代时的频率是变化的。
Input and Parameters:
signal - the time domain signal (1D) to be decomposed
alpha - the balancing parameter of the data-fidelity constraint
tau - time-step of the dual ascent ( pick 0 for noise-slack )
K - the number of modes to be recovered
DC - true if the first mode is put and kept at DC (0-freq)
init - 0 = all omegas start at 0
1 = all omegas start uniformly distributed
2 = all omegas initialized randomly
tol - tolerance of convergence criterion; typically around 1e-6
Output:
u - the collection of decomposed modes
u_hat - spectra of the modes
omega - estimated mode center-frequencies
'''
# Period and sampling frequency of input signal
#分解算法中的采样频率和时间是标准化的,分解信号的采样时间为1s,然后就得到相应的采样频率。采样时间间隔:1/ length(signal),频率: length(signal)。
save_T = len(signal)
fs = 1 / save_T
# extend the signal by mirroring镜像延拓
T = save_T
f_mirror = []
temp = signal[0:T//2]
f_mirror.extend(temp[::-1]) #temp[::-1] 倒序排列
f_mirror.extend(signal)
temp = signal[T//2:T]
f_mirror.extend(temp[::-1])
f = f_mirror
# Time Domain 0 to T (of mirrored signal)
T = len(f)
t = [(i + 1) / T for i in range(T)] # 列表从1开始
# Spectral Domain discretization
#freqs 进行移位是由于进行傅里叶变换时,会有正负对称的频率,分析时一般只有正频率,所以看到的频谱图是没有负频率的
freqs = np.array( [i - 0.5 - 1 / T for i in t] )
# Maximum number of iterations (if not converged yet, then it won't anyway)
N = 500
# For future generalizations: individual alpha for each mode
Alpha = alpha * np.ones(K)
# Construct and center f_hat
transformed = np.fft.fft(f) # 使用fft函数对信号进行快速傅里叶变换。
f_hat = np.fft.fftshift(transformed) # 使用fftshift函数进行移频操作。
f_hat_plus = f_hat
f_hat_plus[0:T // 2] = 0
# f_hat_plus[0:T] = 1 #????????????????????????????////////////
# matrix keeping track of every iterant // could be discarded for mem
u_hat_plus = [np.zeros((N, len(freqs)), dtype=complex) for i in range(K)]
# Initialization of omega_k
omega_plus = np.zeros((N, K))
if init == 1:
for i in range(K):
omega_plus[0, i] = (0.5 / K) * i
elif init == 2:
omega_plus[0, :] = np.sort(np.exp(np.log(fs) + (np.log(0.5) - np.log(fs)) * np.random.rand(K)))
else:
omega_plus[0, :] = 0
# if DC mode imposed, set its omega to 0
if DC:
omega_plus[0, 0] = 0
# start with empty dual variables
lambda_hat = np.zeros( (N, len(freqs)), dtype=complex)
# other inits
eps = 2.2204e-16 # python里没有eps功能
uDiff = tol + eps # update step
n = 1 # loop counter
sum_uk = 0 # accumulator
#----------- Main loop for iterative updates----------
while (uDiff > tol and n < N ): #not converged and below iterations limit
#update first mode accumulator
k = 0
sum_uk = u_hat_plus[K-1][n-1,:]+ sum_uk - u_hat_plus[0][n-1,:] #sum_uk 一直都等于0(1,2000)????????????????
#update spectrum of first mode through Wiener filter of residuals
u_hat_plus[k][n,:] = (f_hat_plus - sum_uk - lambda_hat[n-1,:]/2)/(1+Alpha[k]*(freqs - omega_plus[n-1,k])**2)
#update first omega if not held at 0
if not DC:
omega_plus[n,k] = (freqs[T//2:T]*np.mat(np.abs(u_hat_plus[k][n, T//2:T])**2).H)/np.sum(np.abs(u_hat_plus[k][n,T//2:T])**2)
#update of any other mode
for k in range(K-1):
#accumulator
sum_uk = u_hat_plus[k][n,:] + sum_uk - u_hat_plus[k+1][n-1,:]
#mode spectrum
u_hat_plus[k+1][n,:] = (f_hat_plus - sum_uk - lambda_hat[n-1,:]/2)/(1+Alpha[k+1]*(freqs - omega_plus[n-1,k+1])**2)
#center frequencies
omega_plus[n,k+1] = (freqs[T//2:T]*np.mat(np.abs(u_hat_plus[k+1][n, T//2:T])**2).H)/np.sum(np.abs(u_hat_plus[k+1][n,T//2:T])**2)
#Dual ascent
lambda_hat[n,:] = lambda_hat[n-1,:] + tau*(np.sum([ u_hat_plus[i][n,:] for i in range(K)],0) - f_hat_plus)
#loop counter
n = n+1
#converged yet?
uDiff = eps
for i in range(K):
uDiff = uDiff + 1/T*(u_hat_plus[i][n-1,:]-u_hat_plus[i][n-2,:])*np.mat((u_hat_plus[i][n-1,:]-u_hat_plus[i][n-2,:]).conjugate()).H
uDiff = np.abs(uDiff)
# ------ Postprocessing and cleanup-------
#discard empty space if converged early
N = min(N,n)
omega = omega_plus[0:N,:]
#Signal reconstruction
u_hat = np.zeros((T, K), dtype=complex)
temp = [u_hat_plus[i][N-1,T//2:T] for i in range(K) ]
u_hat[T//2:T,:] = np.squeeze(temp).T
temp = np.squeeze(np.mat(temp).conjugate())
u_hat[1:(T//2+1),:] = temp.T[::-1]
u_hat[0,:] = (u_hat[-1,:]).conjugate()
u = np.zeros((K,len(t)))
for k in range(K):
u[k,:]=np.real(np.fft.ifft(np.fft.ifftshift(u_hat[:,k])))
#remove mirror part
u = u[:,T//4:3*T//4]
#recompute spectrum
u_hat = np.zeros((T//2, K), dtype=complex)
for k in range(K):
u_hat[:,k]= np.squeeze( np.mat( np.fft.fftshift(np.fft.fft(u[k,:])) ).H)
return u, u_hat, omega
| [((50, 12, 50, 52), 'numpy.array', 'np.array', ({(50, 22, 50, 50): '[(i - 0.5 - 1 / T) for i in t]'}, {}), '([(i - 0.5 - 1 / T) for i in t])', True, 'import numpy as np\n'), ((57, 18, 57, 31), 'numpy.fft.fft', 'np.fft.fft', ({(57, 29, 57, 30): 'f'}, {}), '(f)', True, 'import numpy as np\n'), ((58, 12, 58, 40), 'numpy.fft.fftshift', 'np.fft.fftshift', ({(58, 28, 58, 39): 'transformed'}, {}), '(transformed)', True, 'import numpy as np\n'), ((66, 17, 66, 33), 'numpy.zeros', 'np.zeros', ({(66, 26, 66, 32): '(N, K)'}, {}), '((N, K))', True, 'import numpy as np\n'), ((124, 12, 124, 43), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((140, 12, 140, 46), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((55, 20, 55, 30), 'numpy.ones', 'np.ones', ({(55, 28, 55, 29): 'K'}, {}), '(K)', True, 'import numpy as np\n'), ((115, 16, 115, 29), 'numpy.abs', 'np.abs', ({(115, 23, 115, 28): 'uDiff'}, {}), '(uDiff)', True, 'import numpy as np\n'), ((126, 22, 126, 38), 'numpy.squeeze', 'np.squeeze', ({(126, 33, 126, 37): 'temp'}, {}), '(temp)', True, 'import numpy as np\n'), ((128, 22, 128, 34), 'numpy.mat', 'np.mat', ({(128, 29, 128, 33): 'temp'}, {}), '(temp)', True, 'import numpy as np\n'), ((136, 35, 136, 63), 'numpy.fft.ifftshift', 'np.fft.ifftshift', ({(136, 52, 136, 62): 'u_hat[:, (k)]'}, {}), '(u_hat[:, (k)])', True, 'import numpy as np\n'), ((72, 42, 72, 52), 'numpy.log', 'np.log', ({(72, 49, 72, 51): 'fs'}, {}), '(fs)', True, 'import numpy as np\n'), ((96, 99, 96, 130), 'numpy.abs', 'np.abs', ({(96, 106, 96, 129): 'u_hat_plus[k][(n), T // 2:T]'}, {}), '(u_hat_plus[k][(n), T // 2:T])', True, 'import numpy as np\n'), ((105, 103, 105, 136), 'numpy.abs', 'np.abs', ({(105, 110, 105, 135): 'u_hat_plus[k + 1][(n), T // 2:T]'}, {}), '(u_hat_plus[k + 1][(n), T // 2:T])', True, 'import numpy as np\n'), ((142, 56, 142, 74), 'numpy.fft.fft', 'np.fft.fft', ({(142, 67, 142, 73): 'u[(k), :]'}, {}), '(u[(k), :])', True, 'import numpy as np\n'), ((72, 84, 72, 101), 'numpy.random.rand', 'np.random.rand', ({(72, 99, 72, 100): 'K'}, {}), '(K)', True, 'import numpy as np\n'), ((72, 56, 72, 67), 'numpy.log', 'np.log', ({(72, 63, 72, 66): '0.5'}, {}), '(0.5)', True, 'import numpy as np\n'), ((72, 70, 72, 80), 'numpy.log', 'np.log', ({(72, 77, 72, 79): 'fs'}, {}), '(fs)', True, 'import numpy as np\n'), ((96, 52, 96, 84), 'numpy.abs', 'np.abs', ({(96, 59, 96, 83): 'u_hat_plus[k][(n), T // 2:T]'}, {}), '(u_hat_plus[k][(n), T // 2:T])', True, 'import numpy as np\n'), ((105, 54, 105, 88), 'numpy.abs', 'np.abs', ({(105, 61, 105, 87): 'u_hat_plus[k + 1][(n), T // 2:T]'}, {}), '(u_hat_plus[k + 1][(n), T // 2:T])', True, 'import numpy as np\n')] |
AoWangPhilly/PyDS | src/PyDS/Queue/Deque.py | d79f92d0d2e7c005ebb8fa9f631d5f01e590625e | class Deque:
def add_first(self, value):
...
def add_last(self, value):
...
def delete_first(self):
...
def delete_last(self):
...
def first(self):
...
def last(self):
...
def is_empty(self):
...
def __len__(self):
...
def __str__(self):
...
| [] |
asmaasalih/my_project | forum/main.py | 89183d7a2578fa302e94ea29570ab527e9ca47b5 | import models
import stores
member1 =models.Member("ahmed",33)
member2 =models.Member("mohamed",30)
post1=models.Post("Post1", "Content1")
post2= models.Post("Post2", "Content2")
post3= models.Post("Post3", "Content3")
#member store
member_store=stores.MemberStore()
member_store.add(member1)
member_store.add(member2)
print (member_store.get_all())
post_store=stores.PostStore()
post_store.add(post1)
post_store.add(post2)
post_store.add(post3)
print (post_store.get_all())
| [((5, 9, 5, 34), 'models.Member', 'models.Member', ({(5, 23, 5, 30): '"""ahmed"""', (5, 31, 5, 33): '33'}, {}), "('ahmed', 33)", False, 'import models\n'), ((6, 9, 6, 36), 'models.Member', 'models.Member', ({(6, 23, 6, 32): '"""mohamed"""', (6, 33, 6, 35): '30'}, {}), "('mohamed', 30)", False, 'import models\n'), ((8, 6, 8, 38), 'models.Post', 'models.Post', ({(8, 18, 8, 25): '"""Post1"""', (8, 27, 8, 37): '"""Content1"""'}, {}), "('Post1', 'Content1')", False, 'import models\n'), ((9, 7, 9, 39), 'models.Post', 'models.Post', ({(9, 19, 9, 26): '"""Post2"""', (9, 28, 9, 38): '"""Content2"""'}, {}), "('Post2', 'Content2')", False, 'import models\n'), ((10, 7, 10, 39), 'models.Post', 'models.Post', ({(10, 19, 10, 26): '"""Post3"""', (10, 28, 10, 38): '"""Content3"""'}, {}), "('Post3', 'Content3')", False, 'import models\n'), ((13, 13, 13, 33), 'stores.MemberStore', 'stores.MemberStore', ({}, {}), '()', False, 'import stores\n'), ((18, 11, 18, 29), 'stores.PostStore', 'stores.PostStore', ({}, {}), '()', False, 'import stores\n')] |
bhavyanshu/Shell-Finder | shellfind.py | 308b3ba7f1a53b8a6cc738d69c01f4b7108d0860 | #!/usr/bin/env python
'''
Author : Bhavyanshu Parasher
Email : [email protected]
Description : shellfind.py is a Python command line utility which lets you look for shells on a site that the hacker must have uploaded. It considers all the shells available and tries all possibilities via dictionary match.
'''
import socket
import sys
import httplib
from urlparse import urlparse
import time as t
import urllib2
from urllib2 import Request, urlopen, URLError
negative = '\033[91m'
positive = '\033[32m'
wait = '\033[95m'
final = '\033[93m'
total_scanned_global=0
found_scanned_global=0
def OpenLog(log_file_name):
try:
f = open(log_file_name, 'r')
return f.read()
f.close()
except IOError:
return "File" + log_file_name + "does not exist."
def main():
socket.setdefaulttimeout(10)
print wait+"\n## ------ Welcome to Shell Finder Utility - Developed by Bhavyanshu Parasher (http://bhavyanshu.github.io) | Apache License V2.0 | Project Source (https://github.com/bhavyanshu/Shell-Finder) ------ ##"
website_url = raw_input("\n\nEnter URL to scan ([eg, http://sitename.com or https://sitename.com/subdir ] | Do not add slash at the end of URL) : ")
parse_url=urlparse(website_url)
log_file_name = "LOG/"+parse_url.netloc+".log"
global total_scanned_global
global found_scanned_global
try:
try:
create=open(log_file_name,"w")
except:
print negative+"\nError generating log file. Please check directory access permissions."
print wait+"\nCreating a persistent connection to site "+website_url
conn = urllib2.Request(website_url)
urllib2.urlopen(website_url)
print positive+"Connected! Begining to scan for shells.."
except (urllib2.HTTPError) as Exit:
print negative+"\nEither the server is down or you are not connected to the internet."
exit()
try:
dictionary = open("dictionary","r")
except(IOError):
print negative+"Dictionary file not found_scanned_global. Please download the latest dictionary from github link"
exit()
keywords = dictionary.readlines()
for keys in keywords:
keys=keys.replace("\n","") #To replace newline with empty
New_URL = website_url+"/"+keys
print wait+">>>> "+New_URL
req=Request(New_URL)
try:
response = urlopen(req)
except URLError, e:
if hasattr(e,'reason'):
print negative+"Not found"
total_scanned_global = total_scanned_global+1
elif hasattr(e,'code'):
print negative+"Not found "
total_scanned_global = total_scanned_global+1
else:
try:
log_file=open(log_file_name,"a+") #Appending to it
except(IOError):
print negative+"Failed to create log file. Check dir permissions."
found_scanned_url=New_URL
print positive+"Possible shell found at ",found_scanned_url
log_file.writelines(found_scanned_url+"\n")
found_scanned_global=found_scanned_global+1
total_scanned_global=total_scanned_global+1
log_file.close()
print "\nTotal tries : ", total_scanned_global
print positive+"\nPossible shells: ",found_scanned_global
print final+"\nFollowing are the links to possible shells "
print OpenLog(log_file_name)
if __name__ == '__main__':
main()
| [] |
nosisky/algo-solution | question3.py | a9276f73ba63b1a0965c194885aea6cadfab0e0b | # A string S consisting of N characters is considered to be properly nested if any of the following conditions is true:
# S is empty;
# S has the form "(U)" or "[U]" or "{U}" where U is a properly nested string; S has the form "VW" where V and W are properly nested strings.
# For example, the string "{[()()]}" is properly nested but "([)()]" is not.
# Write a function:
# int solution(char *S);
# that, given a string S consisting of N characters, returns 1 if S is properly nested and 0 otherwise.
# For example, given S = "{[()()]}", the function should return 1 and given S = "([)()]", the function should return 0, as explained above.
# Assume that:
# N is an integer within the range [0..200,000];
# string S consists only of the following characters: "(", "{", "[", "]", "}" and/or ")". Complexity:
# expected worst-case time complexity is O(N);
# expected worst-case space complexity is O(N) (not counting the storage required for input arguments).
def solution(s):
sets = dict(zip('({[', ')}]'))
if(not isinstance(s, str)):
return "Invalid input"
collector = []
for bracket in s:
if(bracket in sets):
collector.append(sets[bracket])
elif bracket not in(sets.values()):
return "Invalid input"
elif (bracket != collector.pop()):
return False
return not collector
print(solution("()[]{}"))
| [] |
chrMenzel/A-beautiful-code-in-Python | Teil_27_Game_of_Life_3d.py | 92ee43c1fb03c299384d4de8bebb590c5ba1b623 | import bpy
import random as rnd
from collections import Counter
import itertools as iter
feld_von, feld_bis = -4, 4
spielfeld_von, spielfeld_bis = feld_von-6, feld_bis+6
anz = int((feld_bis-feld_von)**3*.3)
spielfeld = {(rnd.randint(feld_von, feld_bis), rnd.randint(
feld_von, feld_bis), rnd.randint(feld_von, feld_bis)) for _ in range(anz)}
animate_frame = 8
def nachbarn(pos):
for x,y,z in iter.product(range(-1,2), repeat = 3):
if z == y == x == 0: continue
yield pos[0]+x, pos[1]+y, pos[2]+z
def nächsteGeneration(spielfeld):
nachb = Counter([p for pos in spielfeld for p in nachbarn(pos)])
return {pos for pos, anz in nachb.items() if anz == 6 or (anz in (5, 6, 7, 8) and pos in spielfeld)}
def scale_rotate(ob, scale, rot, fr):
ob.scale = (scale, scale, scale)
ob.rotation_euler.rotate_axis("Z", rot)
ob.keyframe_insert(data_path='rotation_euler', frame=fr)
ob.keyframe_insert(data_path='scale', frame=fr)
bpy.ops.mesh.primitive_cube_add(size=0.001, location=(0, 0, 0))
orig_cube = bpy.context.active_object
n = "cube"
m = orig_cube.data.copy()
cubes = {}
for x,y,z in iter.product(range(spielfeld_von,spielfeld_bis), repeat = 3):
o = bpy.data.objects.new(n, m)
o.location = (x, y, z)
cubes[x, y, z] = o
bpy.context.collection.objects.link(o)
o.select_set(False)
for i in range(200):
print(f'Durchlauf No. {i}, Anz. Zellen = {len(spielfeld)}')
spielfeld2 = nächsteGeneration(spielfeld)
dead = spielfeld - spielfeld2
new = spielfeld2 - spielfeld
spielfeld = spielfeld2
if not new and not dead:
break
for zelle in new | dead:
if zelle not in cubes:
continue
ob = cubes[zelle]
if zelle in new:
scale_rotate(ob, 0.001, -3.141/2, (i-1)*animate_frame)
scale_rotate(ob, 750, 3.141/2, i * animate_frame)
else:
scale_rotate(ob, 750, 3.141/2, (i-1) * animate_frame)
scale_rotate(ob, 0.001, -3.141/2, i * animate_frame)
if not spielfeld:
break
bpy.context.scene.frame_current = 1
| [((34, 0, 34, 63), 'bpy.ops.mesh.primitive_cube_add', 'bpy.ops.mesh.primitive_cube_add', (), '', False, 'import bpy\n'), ((41, 6, 41, 32), 'bpy.data.objects.new', 'bpy.data.objects.new', ({(41, 27, 41, 28): 'n', (41, 30, 41, 31): 'm'}, {}), '(n, m)', False, 'import bpy\n'), ((44, 2, 44, 40), 'bpy.context.collection.objects.link', 'bpy.context.collection.objects.link', ({(44, 38, 44, 39): 'o'}, {}), '(o)', False, 'import bpy\n'), ((11, 14, 11, 45), 'random.randint', 'rnd.randint', ({(11, 26, 11, 34): 'feld_von', (11, 36, 11, 44): 'feld_bis'}, {}), '(feld_von, feld_bis)', True, 'import random as rnd\n'), ((11, 47, 12, 23), 'random.randint', 'rnd.randint', ({(12, 4, 12, 12): 'feld_von', (12, 14, 12, 22): 'feld_bis'}, {}), '(feld_von, feld_bis)', True, 'import random as rnd\n'), ((12, 25, 12, 56), 'random.randint', 'rnd.randint', ({(12, 37, 12, 45): 'feld_von', (12, 47, 12, 55): 'feld_bis'}, {}), '(feld_von, feld_bis)', True, 'import random as rnd\n')] |
openclimatefix/power_perceiver | power_perceiver/xr_batch_processor/reduce_num_pv_systems.py | bafcdfaf6abf42fbab09da641479f74709ddd395 | from dataclasses import dataclass
import numpy as np
import xarray as xr
from power_perceiver.load_prepared_batches.data_sources import PV
from power_perceiver.load_prepared_batches.data_sources.prepared_data_source import XarrayBatch
@dataclass
class ReduceNumPVSystems:
"""Reduce the number of PV systems per example to `requested_num_pv_systems`.
Randomly select PV systems for each example. If there are less PV systems available
than requested, then randomly sample with duplicates allowed.
This is implemented as an xr_batch_processor so it can run after
SelectPVSystemsNearCenterOfImage.
"""
requested_num_pv_systems: int
def __post_init__(self):
self.rng = np.random.default_rng() # Seeded by seed_rngs worker_init_function
def __call__(self, xr_batch: XarrayBatch) -> XarrayBatch:
pv_batch = xr_batch[PV]
num_examples = len(pv_batch.example)
selection = np.zeros(shape=(num_examples, self.requested_num_pv_systems), dtype=np.int32)
for example_i in range(num_examples):
pv_mask_for_example = pv_batch.pv_mask.isel(example=example_i).values
all_indicies = np.nonzero(pv_mask_for_example)[0]
# Only allow a PV system to be chosen multiple times for this example if there are
# less available PV systems than requested PV systems.
replace = len(all_indicies) < self.requested_num_pv_systems
chosen_indicies = self.rng.choice(
all_indicies, size=self.requested_num_pv_systems, replace=replace
)
selection[example_i] = chosen_indicies
selection = xr.DataArray(selection, dims=("example", "pv_system"))
pv_batch = pv_batch.isel(pv_system=selection)
xr_batch[PV] = pv_batch
return xr_batch
| [((24, 19, 24, 42), 'numpy.random.default_rng', 'np.random.default_rng', ({}, {}), '()', True, 'import numpy as np\n'), ((30, 20, 30, 97), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((42, 20, 42, 74), 'xarray.DataArray', 'xr.DataArray', (), '', True, 'import xarray as xr\n'), ((33, 27, 33, 58), 'numpy.nonzero', 'np.nonzero', ({(33, 38, 33, 57): 'pv_mask_for_example'}, {}), '(pv_mask_for_example)', True, 'import numpy as np\n')] |
wang153723482/HelloWorld_my | HelloWorld_python/log/demo_log_3.py | b8642ad9742f95cfebafc61f25b00e917485e50c | #encoding=utf8
# 按天生成文件
import logging
import time
from logging.handlers import TimedRotatingFileHandler
#----------------------------------------------------------------------
if __name__ == "__main__":
logFilePath = "timed_test.log"
logger = logging.getLogger("YouLoggerName")
logger.setLevel(logging.INFO)
handler = TimedRotatingFileHandler(logFilePath,
when="d",
interval=1,
backupCount=7)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
for i in range(6):
logger.info("This is a info!")
logger.debug("This is a debug!")
# time.sleep(61) | [((12, 13, 12, 47), 'logging.getLogger', 'logging.getLogger', ({(12, 31, 12, 46): '"""YouLoggerName"""'}, {}), "('YouLoggerName')", False, 'import logging\n'), ((15, 14, 18, 53), 'logging.handlers.TimedRotatingFileHandler', 'TimedRotatingFileHandler', (), '', False, 'from logging.handlers import TimedRotatingFileHandler\n'), ((19, 16, 19, 89), 'logging.Formatter', 'logging.Formatter', ({(19, 34, 19, 88): '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}, {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')", False, 'import logging\n')] |
Atsocs/bot-da-os | bot_da_os/statemachine/person/person_action.py | e6d54057f4a3b703f303e9944a39e291ac87c40f | from operator import eq
class PersonAction:
def __init__(self, action):
self.action = action
def __str__(self): return self.action
def __eq__(self, other):
return eq(self.action, other.action)
# Necessary when __cmp__ or __eq__ is defined
# in order to make this class usable as a
# dictionary key:
def __hash__(self):
return hash(self.action)
# Static fields; an enumeration of instances:
PersonAction.compliment = PersonAction("person compliments")
PersonAction.informing = PersonAction("person gives information about the service order")
PersonAction.query = PersonAction("person wants to know about his/her order")
PersonAction.angry = PersonAction("person is pissed off")
| [((11, 15, 11, 44), 'operator.eq', 'eq', ({(11, 18, 11, 29): 'self.action', (11, 31, 11, 43): 'other.action'}, {}), '(self.action, other.action)', False, 'from operator import eq\n')] |
bisw1jit/MyServer | MyServer.py | cbd7bc4015482ce8f24314894148f7e20ef66b21 | # Tool Name :- MyServer
# Author :- LordReaper
# Date :- 13/11/2018 - 9/11/2019
# Powered By :- H1ckPro Software's
import sys
import os
from time import sleep
from core.system import *
if len(sys.argv)>1:
pass
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
sys.exit()
if sys.argv[1]=="-s":
if len(sys.argv)==2:
if system=="ubuntu":
os.system("sudo python3 core/s.py "+sys.argv[1])
else:
os.system("python3 core/s.py "+sys.argv[1])
elif len(sys.argv)==3:
if sys.argv[2]=="apache":
if system=="ubuntu":
os.system("sudo python3 core/server.py -apa")
else:
os.system("python3 core/server.py -apa")
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
elif len(sys.argv)==6:
if sys.argv[2]=="-php":
if system=="ubuntu":
os.system("sudo python3 core/server.py -php "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5])
else:
os.system("python3 core/server.py -php "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5])
elif sys.argv[2]=="-py":
if system=="ubuntu":
os.system("sudo python3 core/server.py -py "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5])
else:
os.system("python3 core/server.py -py "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5])
elif sys.argv[2]=="-ng":
if system=="ubuntu":
os.system("sudo python3 core/server.py -ng "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5])
else:
os.system("python3 core/server.py -ng "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5])
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
elif len(sys.argv)==5:
if system=="ubuntu":
os.system("sudo python3 core/server.py -d "+sys.argv[2]+" "+sys.argv[3]+" "+sys.argv[4])
else:
os.system("python3 core/server.py -d "+sys.argv[2]+" "+sys.argv[3]+" "+sys.argv[4])
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
elif sys.argv[1]=="-h":
if len(sys.argv)==2:
if system=="ubuntu":
os.system("sudo python3 core/s.py "+sys.argv[1])
else:
os.system("python3 core/s.py "+sys.argv[1])
elif len(sys.argv)==5:
if system=="ubuntu":
os.system("sudo python3 core/host.py "+sys.argv[2]+" "+sys.argv[3]+" "+sys.argv[4])
else:
os.system("python3 core/host.py "+sys.argv[2]+" "+sys.argv[3]+" "+sys.argv[4])
else:
print ("error : invalid arguments")
print ("use : myserver --help for more information")
elif sys.argv[1]=="-db":
if len(sys.argv)==3:
if sys.argv[2]=="start":
if system=="ubuntu":
os.system("sudo python3 core/mysql.py "+sys.argv[2])
else:
os.system("python3 core/mysql.py "+sys.argv[2])
elif sys.argv[2]=="stop":
if system=="ubuntu":
os.system("sudo python3 core/mysql.py "+sys.argv[2])
else:
os.system("python3 core/mysql.py "+sys.argv[2])
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
elif sys.argv[1]=="rm":
if len(sys.argv)==3:
if sys.argv[2]=="-T" or sys.argv[2]=="-t":
if system=="ubuntu":
os.system("sudo python3 core/un.py")
else:
os.system("python3 core/un.py")
else:
print ("error : invalid arguments")
print ("use : myserver --help for more information")
else:
print ("error : invalid arguments")
print ("use : myserver --help for more information")
elif sys.argv[1]=="update":
if system=="ubuntu":
os.system("sudo python3 core/upd.py")
else:
os.system("python3 core/upd.py")
elif sys.argv[1]=="start":
if system=="ubuntu":
os.system("sudo python3 .MyServer.py")
else:
os.system("python3 .MyServer.py")
elif sys.argv[1]=="--help" or sys.argv[1]=="-help" or sys.argv[1]=="help":
print ("")
print ("Usage: myserver [command]... [arguments]...")
print ("")
print (" Commands:")
print (" -s <hostname> <port> <path> to start default localhost server.")
print (" -s -ng <hostname> <port> <path> to start php localhost server.")
print (" -s -php <hostname> <port> <path> to start php localhost server.")
print (" -s -py <hostname> <port> <path> to start python localhost server.")
print (" -h <hostname> <localhost_port> <port> to access localhost server on internet.")
print (" -db [start/stop] to start/stop MySQL database server.")
print (" -s apache to start apache web server.")
print (" update update MyServer.")
print (" rm -t uninstall MyServer.")
print (" start start MyServer menu.")
print ("")
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
| [((16, 2, 16, 12), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((22, 6, 22, 54), 'os.system', 'os.system', ({(22, 16, 22, 53): "('sudo python3 core/s.py ' + sys.argv[1])"}, {}), "('sudo python3 core/s.py ' + sys.argv[1])", False, 'import os\n'), ((24, 6, 24, 49), 'os.system', 'os.system', ({(24, 16, 24, 48): "('python3 core/s.py ' + sys.argv[1])"}, {}), "('python3 core/s.py ' + sys.argv[1])", False, 'import os\n'), ((68, 6, 68, 54), 'os.system', 'os.system', ({(68, 16, 68, 53): "('sudo python3 core/s.py ' + sys.argv[1])"}, {}), "('sudo python3 core/s.py ' + sys.argv[1])", False, 'import os\n'), ((70, 6, 70, 49), 'os.system', 'os.system', ({(70, 16, 70, 48): "('python3 core/s.py ' + sys.argv[1])"}, {}), "('python3 core/s.py ' + sys.argv[1])", False, 'import os\n'), ((29, 8, 29, 53), 'os.system', 'os.system', ({(29, 18, 29, 52): '"""sudo python3 core/server.py -apa"""'}, {}), "('sudo python3 core/server.py -apa')", False, 'import os\n'), ((31, 8, 31, 48), 'os.system', 'os.system', ({(31, 18, 31, 47): '"""python3 core/server.py -apa"""'}, {}), "('python3 core/server.py -apa')", False, 'import os\n'), ((74, 6, 74, 89), 'os.system', 'os.system', ({(74, 16, 74, 88): "('sudo python3 core/host.py ' + sys.argv[2] + ' ' + sys.argv[3] + ' ' + sys\n .argv[4])"}, {}), "('sudo python3 core/host.py ' + sys.argv[2] + ' ' + sys.argv[3] +\n ' ' + sys.argv[4])", False, 'import os\n'), ((76, 6, 76, 84), 'os.system', 'os.system', ({(76, 16, 76, 83): "('python3 core/host.py ' + sys.argv[2] + ' ' + sys.argv[3] + ' ' + sys.argv[4])"}, {}), "('python3 core/host.py ' + sys.argv[2] + ' ' + sys.argv[3] + ' ' +\n sys.argv[4])", False, 'import os\n'), ((39, 8, 39, 98), 'os.system', 'os.system', ({(39, 18, 39, 97): "('sudo python3 core/server.py -php ' + sys.argv[3] + ' ' + sys.argv[4] +\n ' ' + sys.argv[5])"}, {}), "('sudo python3 core/server.py -php ' + sys.argv[3] + ' ' + sys.\n argv[4] + ' ' + sys.argv[5])", False, 'import os\n'), ((41, 8, 41, 93), 'os.system', 'os.system', ({(41, 18, 41, 92): "('python3 core/server.py -php ' + sys.argv[3] + ' ' + sys.argv[4] + ' ' +\n sys.argv[5])"}, {}), "('python3 core/server.py -php ' + sys.argv[3] + ' ' + sys.argv[4] +\n ' ' + sys.argv[5])", False, 'import os\n'), ((58, 6, 58, 94), 'os.system', 'os.system', ({(58, 16, 58, 93): "('sudo python3 core/server.py -d ' + sys.argv[2] + ' ' + sys.argv[3] + ' ' +\n sys.argv[4])"}, {}), "('sudo python3 core/server.py -d ' + sys.argv[2] + ' ' + sys.argv[\n 3] + ' ' + sys.argv[4])", False, 'import os\n'), ((60, 6, 60, 89), 'os.system', 'os.system', ({(60, 16, 60, 88): "('python3 core/server.py -d ' + sys.argv[2] + ' ' + sys.argv[3] + ' ' + sys\n .argv[4])"}, {}), "('python3 core/server.py -d ' + sys.argv[2] + ' ' + sys.argv[3] +\n ' ' + sys.argv[4])", False, 'import os\n'), ((86, 8, 86, 60), 'os.system', 'os.system', ({(86, 18, 86, 59): "('sudo python3 core/mysql.py ' + sys.argv[2])"}, {}), "('sudo python3 core/mysql.py ' + sys.argv[2])", False, 'import os\n'), ((88, 8, 88, 55), 'os.system', 'os.system', ({(88, 18, 88, 54): "('python3 core/mysql.py ' + sys.argv[2])"}, {}), "('python3 core/mysql.py ' + sys.argv[2])", False, 'import os\n'), ((117, 4, 117, 41), 'os.system', 'os.system', ({(117, 14, 117, 40): '"""sudo python3 core/upd.py"""'}, {}), "('sudo python3 core/upd.py')", False, 'import os\n'), ((119, 4, 119, 36), 'os.system', 'os.system', ({(119, 14, 119, 35): '"""python3 core/upd.py"""'}, {}), "('python3 core/upd.py')", False, 'import os\n'), ((44, 8, 44, 97), 'os.system', 'os.system', ({(44, 18, 44, 96): "('sudo python3 core/server.py -py ' + sys.argv[3] + ' ' + sys.argv[4] + ' ' +\n sys.argv[5])"}, {}), "('sudo python3 core/server.py -py ' + sys.argv[3] + ' ' + sys.argv\n [4] + ' ' + sys.argv[5])", False, 'import os\n'), ((46, 8, 46, 92), 'os.system', 'os.system', ({(46, 18, 46, 91): "('python3 core/server.py -py ' + sys.argv[3] + ' ' + sys.argv[4] + ' ' +\n sys.argv[5])"}, {}), "('python3 core/server.py -py ' + sys.argv[3] + ' ' + sys.argv[4] +\n ' ' + sys.argv[5])", False, 'import os\n'), ((91, 8, 91, 60), 'os.system', 'os.system', ({(91, 18, 91, 59): "('sudo python3 core/mysql.py ' + sys.argv[2])"}, {}), "('sudo python3 core/mysql.py ' + sys.argv[2])", False, 'import os\n'), ((93, 8, 93, 55), 'os.system', 'os.system', ({(93, 18, 93, 54): "('python3 core/mysql.py ' + sys.argv[2])"}, {}), "('python3 core/mysql.py ' + sys.argv[2])", False, 'import os\n'), ((105, 8, 105, 44), 'os.system', 'os.system', ({(105, 18, 105, 43): '"""sudo python3 core/un.py"""'}, {}), "('sudo python3 core/un.py')", False, 'import os\n'), ((107, 8, 107, 39), 'os.system', 'os.system', ({(107, 18, 107, 38): '"""python3 core/un.py"""'}, {}), "('python3 core/un.py')", False, 'import os\n'), ((123, 4, 123, 42), 'os.system', 'os.system', ({(123, 14, 123, 41): '"""sudo python3 .MyServer.py"""'}, {}), "('sudo python3 .MyServer.py')", False, 'import os\n'), ((125, 5, 125, 38), 'os.system', 'os.system', ({(125, 15, 125, 37): '"""python3 .MyServer.py"""'}, {}), "('python3 .MyServer.py')", False, 'import os\n'), ((49, 8, 49, 97), 'os.system', 'os.system', ({(49, 18, 49, 96): "('sudo python3 core/server.py -ng ' + sys.argv[3] + ' ' + sys.argv[4] + ' ' +\n sys.argv[5])"}, {}), "('sudo python3 core/server.py -ng ' + sys.argv[3] + ' ' + sys.argv\n [4] + ' ' + sys.argv[5])", False, 'import os\n'), ((51, 8, 51, 92), 'os.system', 'os.system', ({(51, 18, 51, 91): "('python3 core/server.py -ng ' + sys.argv[3] + ' ' + sys.argv[4] + ' ' +\n sys.argv[5])"}, {}), "('python3 core/server.py -ng ' + sys.argv[3] + ' ' + sys.argv[4] +\n ' ' + sys.argv[5])", False, 'import os\n')] |
ffreemt/tmx2epub | tests/test_gen_epub.py | 55a59cb2a9b7f42031a65f64c29e5c43fdb487ea | """ test gen_epub. """
from tmx2epub.gen_epub import gen_epub
def test_gen_epub2():
""" test_gen_epub2. """
from pathlib import Path
infile = r"tests\2.tmx"
stem = Path(infile).absolute().stem
outfile = f"{Path(infile).absolute().parent / stem}.epub"
assert gen_epub(infile, debug=True) == outfile
# assert 0
| [((12, 11, 12, 39), 'tmx2epub.gen_epub.gen_epub', 'gen_epub', (), '', False, 'from tmx2epub.gen_epub import gen_epub\n'), ((10, 11, 10, 23), 'pathlib.Path', 'Path', ({(10, 16, 10, 22): 'infile'}, {}), '(infile)', False, 'from pathlib import Path\n'), ((11, 17, 11, 29), 'pathlib.Path', 'Path', ({(11, 22, 11, 28): 'infile'}, {}), '(infile)', False, 'from pathlib import Path\n')] |
amulyavarote/quickstarts | pub_sub/python/http/checkout/app.py | c21a8f58d515b28eaa8a3680388fa06995c2331b | import json
import time
import random
import logging
import requests
import os
logging.basicConfig(level=logging.INFO)
base_url = os.getenv('BASE_URL', 'http://localhost') + ':' + os.getenv(
'DAPR_HTTP_PORT', '3500')
PUBSUB_NAME = 'order_pub_sub'
TOPIC = 'orders'
logging.info('Publishing to baseURL: %s, Pubsub Name: %s, Topic: %s' % (
base_url, PUBSUB_NAME, TOPIC))
for i in range(1, 10):
order = {'orderId': i}
# Publish an event/message using Dapr PubSub via HTTP Post
result = requests.post(
url='%s/v1.0/publish/%s/%s' % (base_url, PUBSUB_NAME, TOPIC),
json=order
)
logging.info('Published data: ' + json.dumps(order))
time.sleep(1)
| [((8, 0, 8, 39), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((14, 0, 15, 42), 'logging.info', 'logging.info', ({(14, 13, 15, 41): "('Publishing to baseURL: %s, Pubsub Name: %s, Topic: %s' % (base_url,\n PUBSUB_NAME, TOPIC))"}, {}), "('Publishing to baseURL: %s, Pubsub Name: %s, Topic: %s' % (\n base_url, PUBSUB_NAME, TOPIC))", False, 'import logging\n'), ((10, 61, 11, 45), 'os.getenv', 'os.getenv', ({(11, 20, 11, 36): '"""DAPR_HTTP_PORT"""', (11, 38, 11, 44): '"""3500"""'}, {}), "('DAPR_HTTP_PORT', '3500')", False, 'import os\n'), ((21, 13, 24, 5), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((27, 4, 27, 17), 'time.sleep', 'time.sleep', ({(27, 15, 27, 16): '(1)'}, {}), '(1)', False, 'import time\n'), ((10, 11, 10, 52), 'os.getenv', 'os.getenv', ({(10, 21, 10, 31): '"""BASE_URL"""', (10, 33, 10, 51): '"""http://localhost"""'}, {}), "('BASE_URL', 'http://localhost')", False, 'import os\n'), ((25, 38, 25, 55), 'json.dumps', 'json.dumps', ({(25, 49, 25, 54): 'order'}, {}), '(order)', False, 'import json\n')] |
smailedge/pro | jj.py | f86347d4368bc97aa860b37caa9ba10e84a93738 | # -*- coding: utf-8 -*-
from linepy import *
from datetime import datetime
from time import sleep
from humanfriendly import format_timespan, format_size, format_number, format_length
import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse
#==============================================================================#
botStart = time.time()
cl = LINE()
#cl = LINE("TOKEN KAMU")
#cl = LINE("Email","Password")
cl.log("Auth Token : " + str(cl.authToken))
channelToken = cl.getChannelResult()
cl.log("Channel Token : " + str(channelToken))
clMID = cl.profile.mid
clProfile = cl.getProfile()
lineSettings = cl.getSettings()
oepoll = OEPoll(cl)
#==============================================================================#
readOpen = codecs.open("read.json","r","utf-8")
settingsOpen = codecs.open("temp.json","r","utf-8")
read = json.load(readOpen)
settings = json.load(settingsOpen)
myProfile = {
"displayName": "",
"statusMessage": "",
"pictureStatus": ""
}
msg_dict = {}
bl = [""]
myProfile["displayName"] = clProfile.displayName
myProfile["statusMessage"] = clProfile.statusMessage
myProfile["pictureStatus"] = clProfile.pictureStatus
#==============================================================================#
def restartBot():
print ("[ INFO ] BOT RESETTED")
backupData()
python = sys.executable
os.execl(python, python, *sys.argv)
def backupData():
try:
backup = settings
f = codecs.open('temp.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
backup = read
f = codecs.open('read.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
return True
except Exception as error:
logError(error)
return False
def logError(text):
cl.log("[ ERROR ] " + str(text))
time_ = datetime.now()
with open("errorLog.txt","a") as error:
error.write("\n[%s] %s" % (str(time), text))
def sendMessageWithMention(to, mid):
try:
aa = '{"S":"0","E":"3","M":'+json.dumps(mid)+'}'
text_ = '@x '
cl.sendMessage(to, text_, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
logError(error)
def helpmessage():
helpMessage = """╔═════════════
╠♥ ✿✿✿ 十香の特製Bot ✿✿✿ ♥
╠SR 設定已讀點
╠LR 查看誰已讀
╠Nk @ 標註踢人
╠Nk 全部再見
╠══✪〘 其他功能略 〙✪═══
"""
return helpMessage
wait = {
"share":False,
"sender" :{},
}
admin =['ud5ff1dff426cf9e3030c7ac2a61512f0','ua10c2ad470b4b6e972954e1140ad1891',clMID]
owners = ["ua10c2ad470b4b6e972954e1140ad1891","ud5ff1dff426cf9e3030c7ac2a61512f0"]
#if clMID not in owners:
# python = sys.executable
# os.execl(python, python, *sys.argv)
#==============================================================================#
def lineBot(op):
try:
if op.type == 0:
print ("[ 0 ] END OF OPERATION")
return
if op.type == 5:
print ("[ 5 ] NOTIFIED ADD CONTACT")
if settings["autoAdd"] == True:
cl.sendMessage(op.param1, "感謝您加入本帳為好友w".format(str(cl.getContact(op.param1).displayName)))
if op.type == 13:
print ("[ 13 ] NOTIFIED INVITE GROUP")
group = cl.getGroup(op.param1)
if settings["autoJoin"] == True:
cl.acceptGroupInvitation(op.param1)
if op.type == 19:
if op.param2 not in owners:
if op.param2 in owners:
pass
elif wait["protect"] == True:
settings["blacklist"][op.param2] = True
cl.kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendMessage(op.param1,"")
else:
cl.sendMessage(op.param1,"")
if op.type == 24:
print ("[ 24 ] NOTIFIED LEAVE ROOM")
if settings["autoLeave"] == True:
cl.leaveRoom(op.param1)
if op.type == 25 or op.type == 26:
K0 = admin
msg = op.message
if wait["share"] == True:
K0 = msg._from
else:
K0 = admin
# if op.type == 25:
# to = msg.to
# receiver = str(to.displayName)
# print ("send" + receiver + str(text.lower()))
# if op.type == 26:
# to = msg._from
# sender = str(to.displayName)
# print ("receiver" + sender + str(text.lower()))
if op.type == 26 or op.type == 25:
print ("[ 25 ] SEND MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if msg.contentType == 0:
if text is None:
return
#==============================================================================#
if sender in K0:
if text.lower() == 'help':
helpMessage = helpmessage()
cl.sendMessage(to, str(helpMessage))
cl.sendContact(to,"u0a59c278b1529476ddb210cb5e827ffc")
cl.sendContact(to,"ufb30e2203f44bc7b72e28b09a88c9bbd")
#==============================================================================#
elif text.lower() == 'speed':
start = time.time()
cl.sendMessage(to, "計算中...")
elapsed_time = time.time() - start
cl.sendMessage(to,format(str(elapsed_time)))
elif text.lower() == 'restart':
cl.sendMessage(to, "重新啟動中...")
time.sleep(5)
cl.sendMessage(to, "重啟成功,請重新登入")
restartBot()
elif text.lower() == 'runtime':
timeNow = time.time()
runtime = timeNow - botStart
runtime = format_timespan(runtime)
cl.sendMessage(to, "系統已運作 {}".format(str(runtime)))
elif text.lower() == 'about':
try:
arr = []
owner = "ua10c2ad470b4b6e972954e1140ad1891"
creator = cl.getContact(owner)
contact = cl.getContact(clMID)
grouplist = cl.getGroupIdsJoined()
contactlist = cl.getAllContactIds()
blockedlist = cl.getBlockedContactIds()
ret_ = "╔══[ 關於使用者 ]"
ret_ += "\n╠ 使用者名稱 : {}".format(contact.displayName)
ret_ += "\n╠ 群組數 : {}".format(str(len(grouplist)))
ret_ += "\n╠ 好友數 : {}".format(str(len(contactlist)))
ret_ += "\n╠ 已封鎖 : {}".format(str(len(blockedlist)))
ret_ += "\n╠══[ 關於本bot ]"
ret_ += "\n╠ 版本 : 最新"
ret_ += "\n╠ 製作者 : {}".format(creator.displayName)
ret_ += "\n╚══[ 感謝您的使用 ]"
cl.sendMessage(to, str(ret_))
except Exception as e:
cl.sendMessage(msg.to, str(e))
#==============================================================================#
elif text.lower() == 'set':
try:
ret_ = "╔══[ 狀態 ]"
if settings["autoAdd"] == True: ret_ += "\n╠ Auto Add ✅"
else: ret_ += "\n╠ Auto Add ❌"
if settings["autoJoin"] == True: ret_ += "\n╠ Auto Join ✅"
else: ret_ += "\n╠ Auto Join ❌"
if settings["autoLeave"] == True: ret_ += "\n╠ Auto Leave ✅"
else: ret_ += "\n╠ Auto Leave ❌"
if settings["autoRead"] == True: ret_ += "\n╠ Auto Read ✅"
else: ret_ += "\n╠ Auto Read ❌"
if settings["reread"] ==True: ret_+="\n╠ Reread ✅"
else: ret_ += "\n╠ Reread ❌"
ret_ += "\n╚══[ Finish ]"
cl.sendMessage(to, str(ret_))
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif text.lower() == 'autoadd on':
settings["autoAdd"] = True
cl.sendMessage(to, "Auto Add on success")
elif text.lower() == 'autoadd off':
settings["autoAdd"] = False
cl.sendMessage(to, "Auto Add off success")
elif text.lower() == 'autojoin on':
settings["autoJoin"] = True
cl.sendMessage(to, "Auto Join on success")
elif text.lower() == 'autojoin off':
settings["autoJoin"] = False
cl.sendMessage(to, "Auto Join off success")
elif text.lower() == 'autoleave on':
settings["autoLeave"] = True
cl.sendMessage(to, "Auto Leave on success")
elif text.lower() == 'autojoin off':
settings["autoLeave"] = False
cl.sendMessage(to, "Auto Leave off success")
elif text.lower() == 'autoread on':
settings["autoRead"] = True
cl.sendMessage(to, "Auto Read on success")
elif text.lower() == 'autoread off':
settings["autoRead"] = False
cl.sendMessage(to, "Auto Read off success")
elif text.lower() == 'checksticker on':
settings["checkSticker"] = True
cl.sendMessage(to, "Berhasil mengaktifkan Check Details Sticker")
elif text.lower() == 'checksticker off':
settings["checkSticker"] = False
cl.sendMessage(to, "Berhasil menonaktifkan Check Details Sticker")
elif text.lower() == 'detectmention on':
settings["datectMention"] = True
cl.sendMessage(to, "Berhasil mengaktifkan Detect Mention")
elif text.lower() == 'detectmention off':
settings["datectMention"] = False
cl.sendMessage(to, "Berhasil menonaktifkan Detect Mention")
elif text.lower() == 'reread on':
settings["reread"] = True
cl.sendMessage(to,"reread on success")
elif text.lower() == 'reread off':
settings["reread"] = False
cl.sendMessage(to,"reread off success")
elif text.lower() == 'protect on':
settings["protect"] = True
cl.sendMessage(to, "Protect on success")
elif text.lower() == 'protect off':
settings["protect"] = False
cl.sendMessage(to, "Protect off success")
elif text.lower() == 'share on':
wait["share"] = True
cl.sendMessage(to, "已開啟分享")
elif text.lower() == 'share off':
wait["share"] = False
cl.sendMessage(to, "已關閉分享")
#==============================================================================#
elif text.lower() == 'admin ':
MENTION =eval(msg.contentMetadata['MENTION'])
inkey =MENTION['MENTIONEES'][0]['M']
admin.append(str(inkey))
cl.sendMessage(to,"已新增權限")
elif text.lower() == 'demin ':
MENTION =eval(msg.contentMetadata['MENTION'])
inkey =MENTION['MENTIONEES'][0]['M']
admin.remove(str(inkey))
cl.sendMessage(to,"已停止權限")
elif text.lower() == 'adminlist':
if admin == []:
cl.sendMessage(to,"無擁有權限者!")
else:
mc = "╔══[ Admin List ]"
for mi_d in admin:
mc += "\n╠ "+cl.getContact(mi_d).displayName
cl.sendMessage(to,mc + "\n╚══[ Finish ]")
#==============================================================================#
elif text.lower() == 'me':
sendMessageWithMention(to, clMID)
cl.sendContact(to, clMID)
elif text.lower() == 'mymid':
cl.sendMessage(msg.to,"[MID]\n" + clMID)
elif text.lower() == 'myname':
me = cl.getContact(clMID)
cl.sendMessage(msg.to,"[Name]\n" + me.displayName)
elif text.lower() == 'mytoken':
me = cl.getContact(clMID)
cl.sendMessage(msg.to,"[StatusMessage]\n" + me.statusMessage)
elif text.lower() == 'mypicture':
me = cl.getContact(clMID)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus)
elif text.lower() == 'myvideoprofile':
me = cl.getContact(clMID)
cl.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus + "/vp")
elif text.lower() == 'mycover':
me = cl.getContact(clMID)
cover = cl.getProfileCoverURL(clMID)
cl.sendImageWithURL(msg.to, cover)
elif msg.text.lower().startswith("contact "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = cl.getContact(ls)
mi_d = contact.mid
cl.sendContact(msg.to, mi_d)
elif msg.text.lower().startswith("mid "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
ret_ = "[ Mid User ]"
for ls in lists:
ret_ += "\n" + ls
cl.sendMessage(msg.to, str(ret_))
elif msg.text.lower().startswith("name "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = cl.getContact(ls)
cl.sendMessage(msg.to, "[ 名字 ]\n" + contact.displayName)
for ls in lists:
contact = cl.getContact(ls)
cl.sendMessage(msg.to, "[ 個簽 ]\n" + contact.statusMessage)
for ls in lists:
path = "http://dl.profile.cl.naver.jp/" + cl.getContact(ls).pictureStatus
cl.sendImageWithURL(msg.to, str(path))
for ls in lists:
path = cl.getProfileCoverURL(ls)
pmath = "http://dl.profile.cl.naver.jp/" + cl.getContact(ls).pictureStatus
cl.sendImageWithURL(msg.to, path)
try:
key = eval(msg.contentMetadata["MENTION"])
u = key["MENTIONEES"][0]["M"]
cname = cl.getContact(u).displayName
cmid = cl.getContact(u).mid
cstatus = cl.getContact(u).statusMessage
cpic = cl.getContact(u).picturePath
cl.sendMessage(receiver, 'Nama : '+cname+'\nMID : '+cmid+'\nStatus Msg : '+cstatus+'\nPicture : http://dl.profile.line.naver.jp'+cpic)
cl.sendMessage(receiver, None, contentMetadata={'mid': cmid}, contentType=13)
if cl.getContact(u).videoProfile != None:
cl.sendVideoWithURL(receiver, 'http://dl.profile.line.naver.jp'+cpic+'/vp.small')
else:
cl.sendImageWithURL(receiver, 'http://dl.profile.line.naver.jp'+cpic)
except Exception as e:
cl.sendMessage(receiver, str(e))
if line != None:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = cl.getProfileCoverURL(ls)
cl.sendImageWithURL(msg.to, str(path))
elif msg.text.lower().startswith("cloneprofile "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
contact = mention["M"]
break
try:
cl.cloneContactProfile(contact)
cl.sendMessage(msg.to, "Berhasil clone member tunggu beberapa saat sampai profile berubah")
except:
cl.sendMessage(msg.to, "Gagal clone member")
elif text.lower() == 'restoreprofile':
try:
clProfile.displayName = str(myProfile["displayName"])
clProfile.statusMessage = str(myProfile["statusMessage"])
clProfile.pictureStatus = str(myProfile["pictureStatus"])
cl.updateProfileAttribute(8, clProfile.pictureStatus)
cl.updateProfile(clProfile)
cl.sendMessage(msg.to, "Berhasil restore profile tunggu beberapa saat sampai profile berubah")
except:
cl.sendMessage(msg.to, "Gagal restore profile")
#==============================================================================#
elif msg.text.lower().startswith("mimicadd "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
settings["mimic"]["target"][target] = True
cl.sendMessage(msg.to,"已加入模仿名單!")
break
except:
cl.sendMessage(msg.to,"添加失敗 !")
break
elif msg.text.lower().startswith("mimicdel "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del settings["模仿名單"]["target"][target]
cl.sendMessage(msg.to,"刪除成功 !")
break
except:
cl.sendMessage(msg.to,"刪除失敗 !")
break
elif text.lower() == 'mimiclist':
if settings["mimic"]["target"] == {}:
cl.sendMessage(msg.to,"未設定模仿目標")
else:
mc = "╔══[ Mimic List ]"
for mi_d in settings["mimic"]["target"]:
mc += "\n╠ "+cl.getContact(mi_d).displayName
cl.sendMessage(msg.to,mc + "\n╚══[ Finish ]")
elif "mimic" in msg.text.lower():
sep = text.split(" ")
mic = text.replace(sep[0] + " ","")
if mic == "on":
if settings["mimic"]["status"] == False:
settings["mimic"]["status"] = True
cl.sendMessage(msg.to,"Reply Message on")
elif mic == "off":
if settings["mimic"]["status"] == True:
settings["mimic"]["status"] = False
cl.sendMessage(msg.to,"Reply Message off")
#==============================================================================#
elif text.lower() == 'groupcreator':
group = cl.getGroup(to)
GS = group.creator.mid
cl.sendContact(to, GS)
elif text.lower() == 'groupid':
gid = cl.getGroup(to)
cl.sendMessage(to, "[ID Group : ]\n" + gid.id)
elif text.lower() == 'grouppicture':
group = cl.getGroup(to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(to, path)
elif text.lower() == 'groupname':
gid = cl.getGroup(to)
cl.sendMessage(to, "[群組名稱 : ]\n" + gid.name)
elif text.lower() == 'grouplink':
if msg.toType == 2:
group = cl.getGroup(to)
if group.preventedJoinByTicket == False:
ticket = cl.reissueGroupTicket(to)
cl.sendMessage(to, "[ Group Ticket ]\nhttps://cl.me/R/ti/g/{}".format(str(ticket)))
else:
cl.sendMessage(to, "Grouplink未開啟 {}openlink".format(str(settings["keyCommand"])))
elif text.lower() == 'link off':
if msg.toType == 2:
group = cl.getGroup(to)
if group.preventedJoinByTicket == False:
cl.sendMessage(to, "群組網址已關")
else:
group.preventedJoinByTicket = False
cl.updateGroup(group)
cl.sendMessage(to, "關閉成功")
elif text.lower() == 'link on':
if msg.toType == 2:
group = cl.getGroup(to)
if group.preventedJoinByTicket == True:
cl.sendMessage(to, "群組網址已開")
else:
group.preventedJoinByTicket = True
cl.updateGroup(group)
cl.sendMessage(to, "開啟成功")
elif text.lower() == 'groupinfo':
group = cl.getGroup(to)
try:
gCreator = group.creator.displayName
except:
gCreator = "不明"
if group.invitee is None:
gPending = "0"
else:
gPending = str(len(group.invitee))
if group.preventedJoinByTicket == True:
gQr = "關閉"
gTicket = "無"
else:
gQr = "開啟"
gTicket = "https://cl.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(group.id)))
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
ret_ = "╔══[ Group Info ]"
ret_ += "\n╠ 群組名稱 : {}".format(str(group.name))
ret_ += "\n╠ 群組 Id : {}".format(group.id)
ret_ += "\n╠ 創建者 : {}".format(str(gCreator))
ret_ += "\n╠ 群組人數 : {}".format(str(len(group.members)))
ret_ += "\n╠ 邀請中 : {}".format(gPending)
ret_ += "\n╠ 網址狀態 : {}".format(gQr)
ret_ += "\n╠ 群組網址 : {}".format(gTicket)
ret_ += "\n╚══[ Finish ]"
cl.sendMessage(to, str(ret_))
cl.sendImageWithURL(to, path)
elif text.lower() == 'groupmemberlist':
if msg.toType == 2:
group = cl.getGroup(to)
ret_ = "╔══[ 成員名單 ]"
no = 0 + 1
for mem in group.members:
ret_ += "\n╠ {}. {}".format(str(no), str(mem.displayName))
no += 1
ret_ += "\n╚══[ 全部成員共 {} 人]".format(str(len(group.members)))
cl.sendMessage(to, str(ret_))
elif text.lower() == 'grouplist':
groups = cl.groups
ret_ = "╔══[ Group List ]"
no = 0 + 1
for gid in groups:
group = cl.getGroup(gid)
ret_ += "\n╠ {}. {} | {}".format(str(no), str(group.name), str(len(group.members)))
no += 1
ret_ += "\n╚══[ Total {} Groups ]".format(str(len(groups)))
cl.sendMessage(to, str(ret_))
elif msg.text.lower().startswith("nk "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.sendMessage(to,"Fuck you")
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendMessage(to,"Error")
elif msg.text.lower().startswith("ri "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.sendMessage(to,"來回機票一張ww")
cl.kickoutFromGroup(msg.to,[target])
cl.inviteIntoGroup(to,[target])
except:
cl.sendMessage(to,"Error")
elif text.lower() == 'nk':
if msg.toType == 2:
print ("[ 19 ] KICK ALL MEMBER")
_name = msg.text.replace("Byeall","")
gs = cl.getGroup(msg.to)
cl.sendMessage(msg.to,"Sorry guys")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendMessage(msg.to,"Not Found")
else:
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendMessage(msg.to,"")
elif ("Gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendMessage(msg.to,"It can't be used besides the group.")
elif text.lower() == 'cancel':
if msg.toType == 2:
group = cl.getGroup(to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendMessage(msg.to,"已取消所有邀請!")
elif ("Inv " in msg.text):
if msg.toType == 2:
midd = msg.text.replace("Inv ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(to,[midd])
#==============================================================================#
elif text.lower() == 'tagall':
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//100
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*100 : (a+1)*100]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += u'@Alin \n'
cl.sendMessage(to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
cl.sendMessage(to, "Total {} Mention".format(str(len(nama))))
elif text.lower() == 'sr':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to in read['readPoint']:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
read['readPoint'][msg.to] = msg.id
read['readMember'][msg.to] = ""
read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
read['ROM'][msg.to] = {}
with open('read.json', 'w') as fp:
json.dump(read, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"偵測點已設置")
else:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
read['readPoint'][msg.to] = msg.id
read['readMember'][msg.to] = ""
read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
read['ROM'][msg.to] = {}
with open('read.json', 'w') as fp:
json.dump(read, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to, "Set reading point:\n" + readTime)
elif text.lower() == 'readcancel':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to not in read['readPoint']:
cl.sendMessage(msg.to,"偵測點已取消")
else:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
cl.sendMessage(msg.to, "Delete reading point:\n" + readTime)
elif text.lower() == 'resetread':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to in read["readPoint"]:
try:
del read["readPoint"][msg.to]
del read["readMember"][msg.to]
del read["readTime"][msg.to]
except:
pass
cl.sendMessage(msg.to, "Reset reading point:\n" + readTime)
else:
cl.sendMessage(msg.to, "偵測點未設置?")
elif text.lower() == 'lr':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if receiver in read['readPoint']:
if read["ROM"][receiver].items() == []:
cl.sendMessage(receiver,"[ 已讀的人 ]:\nNone")
else:
chiya = []
for rom in read["ROM"][receiver].items():
chiya.append(rom[1])
cmem = cl.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = '[ 已讀的人 ]:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@c\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
text = xpesan+ zxc + "\n[ 已讀時間 ]: \n" + readTime
try:
cl.sendMessage(receiver, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except Exception as error:
print (error)
pass
else:
cl.sendMessage(receiver,"尚未設置偵測點")
#==============================================================================#
elif msg.text.lower().startswith("ban "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
settings["blacklist"][target] = True
cl.sendMessage(msg.to,"已加入黑單!")
break
except:
cl.sendMessage(msg.to,"添加失敗 !")
break
elif msg.text.lower().startswith("unban "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del settings["blacklist"][target]
cl.sendMessage(msg.to,"刪除成功 !")
break
except:
cl.sendMessage(msg.to,"刪除失敗 !")
break
elif text.lower() == 'banlist':
if settings["blacklist"] == {}:
cl.sendMessage(msg.to,"無黑單成員!")
else:
mc = "╔══[ Black List ]"
for mi_d in settings["blacklist"]:
mc += "\n╠ "+cl.getContact(mi_d).displayName
cl.sendMessage(msg.to,mc + "\n╚══[ Finish ]")
elif text.lower() == 'nkban':
if msg.toType == 2:
group = cl.getGroup(to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in settings["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendMessage(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.kickoutFromGroup(msg.to,[jj])
cl.sendMessage(msg.to,"Blacklist kicked out")
elif text.lower() == 'cleanban':
settings["blacklist"] == {ok}
for mi_d in settings["blacklist"]:
try:
del settings["blacklist"][mi_d]
cl.sendMessage(msg.to,"已清空黑單!")
break
except:
cl.sendMessage(msg.to,"刪除失敗 !")
break
elif text.lower() == 'banmidlist':
if settings["blacklist"] == {}:
cl.sendMessage(msg.to,"無黑單成員!")
else:
mc = "╔══[ Black List ]"
for mi_d in settings["blacklist"]:
mc += "\n╠ "+mi_d
cl.sendMessage(to,mc + "\n╚══[ Finish ]")
#==============================================================================#
elif "Copy " in msg.text:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendMessage(to, "Success...")
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
P = contact.pictureStatus
pic = cl.getProfile()
pic.pictureStatus = P
cl.updateProfilePicture(P)
cl.cloneContactProfile(target)
except Exception as e:
cl.sendMessage(to, "Failed!")
elif text.lower() == 'cc9487':
if sender in ['ua10c2ad470b4b6e972954e1140ad1891']:
python = sys.executable
os.execl(python, python, *sys.argv)
else:
pass
#==============================================================================#
elif text.lower() == 'calender':
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
cl.sendMessage(msg.to, readTime)
elif "screenshotwebsite" in msg.text.lower():
sep = text.split(" ")
query = text.replace(sep[0] + " ","")
with requests.session() as web:
r = web.get("http://rahandiapi.herokuapp.com/sswebAPI?key=betakey&link={}".format(urllib.parse.quote(query)))
data = r.text
data = json.loads(data)
cl.sendImageWithURL(to, data["result"])
elif "checkdate" in msg.text.lower():
sep = msg.text.split(" ")
tanggal = msg.text.replace(sep[0] + " ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
ret_ = "╔══[ D A T E ]"
ret_ += "\n╠ Date Of Birth : {}".format(str(data["data"]["lahir"]))
ret_ += "\n╠ Age : {}".format(str(data["data"]["usia"]))
ret_ += "\n╠ Birthday : {}".format(str(data["data"]["ultah"]))
ret_ += "\n╠ Zodiak : {}".format(str(data["data"]["zodiak"]))
ret_ += "\n╚══[ Success ]"
cl.sendMessage(to, str(ret_))
elif msg.contentType == 7:
if settings["checkSticker"] == True:
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
ret_ = "╔══[ Sticker Info ]"
ret_ += "\n╠ STICKER ID : {}".format(stk_id)
ret_ += "\n╠ STICKER PACKAGES ID : {}".format(pkg_id)
ret_ += "\n╠ STICKER VERSION : {}".format(stk_ver)
ret_ += "\n╠ STICKER URL : line://shop/detail/{}".format(pkg_id)
ret_ += "\n╚══[ Finish ]"
cl.sendMessage(to, str(ret_))
elif msg.contentType == 13:
if settings["copy"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
targets = []
for s in groups.members:
if _name in s.displayName:
print ("[Target] Copy")
break
else:
targets.append(copy)
if targets == []:
cl.sendMessage(msg.to, "Not Found...")
pass
else:
for target in targets:
try:
cl.cloneContactProfile(target)
cl.sendMessage(msg.to, "Berhasil clone member tunggu beberapa saat sampai profile berubah")
settings['copy'] = False
break
except:
msg.contentMetadata = {'mid': target}
settings["copy"] = False
break
#==============================================================================#
if op.type == 26:
print ("[ 26 ] RECEIVE MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if settings["autoRead"] == True:
cl.sendChatChecked(to, msg_id)
if to in read["readPoint"]:
if sender not in read["ROM"][to]:
read["ROM"][to][sender] = True
if sender in settings["mimic"]["target"] and settings["mimic"]["status"] == True and settings["mimic"]["target"][sender] == True:
text = msg.text
if text is not None:
cl.sendMessage(msg.to,text)
if msg.contentType == 0 and sender not in clMID and msg.toType == 2:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if clMID in mention["M"]:
if settings["detectMention"] == True:
contact = cl.getContact(sender)
cl.sendMessage(to, "sundala nu")
sendMessageWithMention(to, contact.mid)
break
#==============================================================================#
if op.type == 65:
print ("[ 65 ] REREAD")
try:
at = op.param1
msg_id = op.param2
if setting["reread"] == True:
if msg_id in msg_dict:
if msg_dict[msg_id]["from"] not in bl:
cl.sendMessage(at,"[收回訊息者]\n%s\n[訊息內容]\n%s"%(cl.getContact(msg_dict[msg_id]["from"]).displayName,msg_dict[msg_id]["text"]))
del msg_dict[msg_id]
else:
pass
except Exception as e:
print (e)
#==============================================================================#
if op.type == 55:
print ("[ 55 ] NOTIFIED READ MESSAGE")
try:
if op.param1 in read['readPoint']:
if op.param2 in read['readMember'][op.param1]:
pass
else:
read['readMember'][op.param1] += op.param2
read['ROM'][op.param1][op.param2] = op.param2
backupData()
else:
pass
except:
pass
except Exception as error:
logError(error)
#==============================================================================#
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops is not None:
for op in ops:
lineBot(op)
oepoll.setRevision(op.revision)
except Exception as e:
logError(e)
| [((9, 11, 9, 22), 'time.time', 'time.time', ({}, {}), '()', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((23, 11, 23, 47), 'codecs.open', 'codecs.open', ({(23, 23, 23, 34): '"""read.json"""', (23, 35, 23, 38): '"""r"""', (23, 39, 23, 46): '"""utf-8"""'}, {}), "('read.json', 'r', 'utf-8')", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((24, 15, 24, 51), 'codecs.open', 'codecs.open', ({(24, 27, 24, 38): '"""temp.json"""', (24, 39, 24, 42): '"""r"""', (24, 43, 24, 50): '"""utf-8"""'}, {}), "('temp.json', 'r', 'utf-8')", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((26, 7, 26, 26), 'json.load', 'json.load', ({(26, 17, 26, 25): 'readOpen'}, {}), '(readOpen)', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((27, 11, 27, 34), 'json.load', 'json.load', ({(27, 21, 27, 33): 'settingsOpen'}, {}), '(settingsOpen)', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((46, 4, 46, 39), 'os.execl', 'os.execl', ({(46, 13, 46, 19): 'python', (46, 21, 46, 27): 'python', (46, 29, 46, 38): '*sys.argv'}, {}), '(python, python, *sys.argv)', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((61, 12, 61, 26), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((50, 12, 50, 48), 'codecs.open', 'codecs.open', ({(50, 24, 50, 35): '"""temp.json"""', (50, 36, 50, 39): '"""w"""', (50, 40, 50, 47): '"""utf-8"""'}, {}), "('temp.json', 'w', 'utf-8')", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((51, 8, 51, 74), 'json.dump', 'json.dump', (), '', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((53, 12, 53, 48), 'codecs.open', 'codecs.open', ({(53, 24, 53, 35): '"""read.json"""', (53, 36, 53, 39): '"""w"""', (53, 40, 53, 47): '"""utf-8"""'}, {}), "('read.json', 'w', 'utf-8')", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((54, 8, 54, 74), 'json.dump', 'json.dump', (), '', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((66, 37, 66, 52), 'json.dumps', 'json.dumps', ({(66, 48, 66, 51): 'mid'}, {}), '(mid)', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((161, 28, 161, 39), 'time.time', 'time.time', ({}, {}), '()', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((958, 32, 958, 59), 're.findall', 're.findall', ({(958, 43, 958, 52): '"""@(\\\\w+)"""', (958, 54, 958, 58): 'text'}, {}), "('@(\\\\w+)', text)", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((959, 34, 959, 82), 'ast.literal_eval', 'ast.literal_eval', ({(959, 51, 959, 81): "msg.contentMetadata['MENTION']"}, {}), "(msg.contentMetadata['MENTION'])", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((163, 35, 163, 46), 'time.time', 'time.time', ({}, {}), '()', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((167, 20, 167, 33), 'time.sleep', 'time.sleep', ({(167, 31, 167, 32): '(5)'}, {}), '(5)', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((171, 30, 171, 41), 'time.time', 'time.time', ({}, {}), '()', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((173, 30, 173, 54), 'humanfriendly.format_timespan', 'format_timespan', ({(173, 46, 173, 53): 'runtime'}, {}), '(runtime)', False, 'from humanfriendly import format_timespan, format_size, format_number, format_length\n'), ((314, 32, 314, 59), 're.findall', 're.findall', ({(314, 43, 314, 52): '"""@(\\\\w+)"""', (314, 54, 314, 58): 'text'}, {}), "('@(\\\\w+)', text)", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((315, 34, 315, 82), 'ast.literal_eval', 'ast.literal_eval', ({(315, 51, 315, 81): "msg.contentMetadata['MENTION']"}, {}), "(msg.contentMetadata['MENTION'])", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((327, 32, 327, 59), 're.findall', 're.findall', ({(327, 43, 327, 52): '"""@(\\\\w+)"""', (327, 54, 327, 58): 'text'}, {}), "('@(\\\\w+)', text)", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((328, 34, 328, 82), 'ast.literal_eval', 'ast.literal_eval', ({(328, 51, 328, 81): "msg.contentMetadata['MENTION']"}, {}), "(msg.contentMetadata['MENTION'])", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((340, 32, 340, 59), 're.findall', 're.findall', ({(340, 43, 340, 52): '"""@(\\\\w+)"""', (340, 54, 340, 58): 'text'}, {}), "('@(\\\\w+)', text)", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((341, 34, 341, 82), 'ast.literal_eval', 'ast.literal_eval', ({(341, 51, 341, 81): "msg.contentMetadata['MENTION']"}, {}), "(msg.contentMetadata['MENTION'])", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((379, 36, 379, 63), 're.findall', 're.findall', ({(379, 47, 379, 56): '"""@(\\\\w+)"""', (379, 58, 379, 62): 'text'}, {}), "('@(\\\\w+)', text)", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((380, 38, 380, 86), 'ast.literal_eval', 'ast.literal_eval', ({(380, 55, 380, 85): "msg.contentMetadata['MENTION']"}, {}), "(msg.contentMetadata['MENTION'])", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((391, 32, 391, 59), 're.findall', 're.findall', ({(391, 43, 391, 52): '"""@(\\\\w+)"""', (391, 54, 391, 58): 'text'}, {}), "('@(\\\\w+)', text)", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((392, 34, 392, 82), 'ast.literal_eval', 'ast.literal_eval', ({(392, 51, 392, 81): "msg.contentMetadata['MENTION']"}, {}), "(msg.contentMetadata['MENTION'])", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((628, 25, 628, 54), 'pytz.timezone', 'pytz.timezone', ({(628, 39, 628, 53): '"""Asia/Jakarta"""'}, {}), "('Asia/Jakarta')", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((629, 30, 629, 49), 'datetime.datetime.now', 'datetime.now', (), '', False, 'from datetime import datetime\n'), ((669, 25, 669, 54), 'pytz.timezone', 'pytz.timezone', ({(669, 39, 669, 53): '"""Asia/Jakarta"""'}, {}), "('Asia/Jakarta')", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((670, 30, 670, 49), 'datetime.datetime.now', 'datetime.now', (), '', False, 'from datetime import datetime\n'), ((652, 32, 652, 77), 'json.dump', 'json.dump', (), '', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((666, 28, 666, 73), 'json.dump', 'json.dump', (), '', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((692, 25, 692, 54), 'pytz.timezone', 'pytz.timezone', ({(692, 39, 692, 53): '"""Asia/Jakarta"""'}, {}), "('Asia/Jakarta')", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((693, 30, 693, 49), 'datetime.datetime.now', 'datetime.now', (), '', False, 'from datetime import datetime\n'), ((625, 82, 625, 110), 'json.dumps', 'json.dumps', ({(625, 93, 625, 109): "{'MENTIONEES': b}"}, {}), "({'MENTIONEES': b})", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((649, 55, 649, 69), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((663, 51, 663, 65), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((715, 25, 715, 54), 'pytz.timezone', 'pytz.timezone', ({(715, 39, 715, 53): '"""Asia/Jakarta"""'}, {}), "('Asia/Jakarta')", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((716, 30, 716, 49), 'datetime.datetime.now', 'datetime.now', (), '', False, 'from datetime import datetime\n'), ((855, 24, 855, 59), 'os.execl', 'os.execl', ({(855, 33, 855, 39): 'python', (855, 41, 855, 47): 'python', (855, 49, 855, 58): '*sys.argv'}, {}), '(python, python, *sys.argv)', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((860, 25, 860, 55), 'pytz.timezone', 'pytz.timezone', ({(860, 39, 860, 54): '"""Asia/Makassar"""'}, {}), "('Asia/Makassar')", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((861, 30, 861, 49), 'datetime.datetime.now', 'datetime.now', (), '', False, 'from datetime import datetime\n'), ((750, 107, 750, 122), 'json.dumps', 'json.dumps', ({(750, 118, 750, 121): 'zx2'}, {}), '(zx2)', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((876, 25, 876, 43), 'requests.session', 'requests.session', ({}, {}), '()', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((879, 31, 879, 47), 'json.loads', 'json.loads', ({(879, 42, 879, 46): 'data'}, {}), '(data)', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((884, 22, 884, 167), 'requests.get', 'requests.get', ({(884, 35, 884, 166): "'https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal=' + tanggal"}, {}), "(\n 'https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='\n + tanggal)", False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((886, 25, 886, 41), 'json.loads', 'json.loads', ({(886, 36, 886, 40): 'data'}, {}), '(data)', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n'), ((877, 106, 877, 131), 'urllib.parse.quote', 'urllib.parse.quote', ({(877, 125, 877, 130): 'query'}, {}), '(query)', False, 'import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse\n')] |
datadealer/dd_app | dd_app/messaging/backend.py | 3806b9b9df165a49f0fca8a249170b4ccd4d0177 | class RedisBackend(object):
def __init__(self, settings={}, *args, **kwargs):
self.settings = settings
@property
def connection(self):
# cached redis connection
if not hasattr(self, '_connection'):
self._connection = self.settings.get('redis.connector').get()
return self._connection
@property
def channel(self):
# Fanout channel
if not hasattr(self, '_channel'):
self._channel = self.connection.pubsub()
return self._channel
def subscribe(self, channels=[]):
# Fanout subscriber
for chan_id in channels:
self.channel.subscribe(chan_id)
def listen(self):
# Fanout generator
for m in self.channel.listen():
if m['type'] == 'message':
yield m
def send(self, channel_id, payload):
# Fanout emitter
return self.connection.publish(channel_id, payload)
def listen_queue(self, queue_keys):
# Message queue generator
while 1:
yield self.connection.blpop(queue_keys)
def send_queue(self, queue_key, payload):
return self.connection.rpush(payload)
| [] |
bitfag/bt-macd-binance | fetch_data.py | eeffe52f8e561ff521629839078ff886e7bf700e | #!/usr/bin/env python
from btmacd.binance_fetcher import BinanceFetcher
def main():
fetcher = BinanceFetcher("BTCUSDT", filename="binance_ohlc.csv", start_date="01.01.2018")
fetcher.fetch()
if __name__ == "__main__":
main()
| [((7, 14, 7, 93), 'btmacd.binance_fetcher.BinanceFetcher', 'BinanceFetcher', (), '', False, 'from btmacd.binance_fetcher import BinanceFetcher\n')] |
Frightera/probability | tensorflow_probability/python/mcmc/diagnostic.py | deac4562cbc1056e6abebc7450218d38444fe65d | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for Markov Chain Monte Carlo (MCMC) sampling.
@@effective_sample_size
@@potential_scale_reduction
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import stats
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import nest_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
__all__ = [
'effective_sample_size',
'potential_scale_reduction',
]
def effective_sample_size(states,
filter_threshold=0.,
filter_beyond_lag=None,
filter_beyond_positive_pairs=False,
cross_chain_dims=None,
validate_args=False,
name=None):
"""Estimate a lower bound on effective sample size for each independent chain.
Roughly speaking, "effective sample size" (ESS) is the size of an iid sample
with the same variance as `state`.
More precisely, given a stationary sequence of possibly correlated random
variables `X_1, X_2, ..., X_N`, identically distributed, ESS is the
number such that
```
Variance{ N**-1 * Sum{X_i} } = ESS**-1 * Variance{ X_1 }.
```
If the sequence is uncorrelated, `ESS = N`. If the sequence is positively
auto-correlated, `ESS` will be less than `N`. If there are negative
correlations, then `ESS` can exceed `N`.
Some math shows that, with `R_k` the auto-correlation sequence,
`R_k := Covariance{X_1, X_{1+k}} / Variance{X_1}`, we have
```
ESS(N) = N / [ 1 + 2 * ( (N - 1) / N * R_1 + ... + 1 / N * R_{N-1} ) ]
```
This function estimates the above by first estimating the auto-correlation.
Since `R_k` must be estimated using only `N - k` samples, it becomes
progressively noisier for larger `k`. For this reason, the summation over
`R_k` should be truncated at some number `filter_beyond_lag < N`. This
function provides two methods to perform this truncation.
* `filter_threshold` -- since many MCMC methods generate chains where `R_k >
0`, a reasonable criterion is to truncate at the first index where the
estimated auto-correlation becomes negative. This method does not estimate
the `ESS` of super-efficient chains (where `ESS > N`) correctly.
* `filter_beyond_positive_pairs` -- reversible MCMC chains produce
an auto-correlation sequence with the property that pairwise sums of the
elements of that sequence are positive [Geyer][1], i.e.
`R_{2k} + R_{2k + 1} > 0` for `k in {0, ..., N/2}`. Deviations are only
possible due to noise. This method truncates the auto-correlation sequence
where the pairwise sums become non-positive.
The arguments `filter_beyond_lag`, `filter_threshold` and
`filter_beyond_positive_pairs` are filters intended to remove noisy tail terms
from `R_k`. You can combine `filter_beyond_lag` with `filter_threshold` or
`filter_beyond_positive_pairs. E.g., combining `filter_beyond_lag` and
`filter_beyond_positive_pairs` means that terms are removed if they were to be
filtered under the `filter_beyond_lag` OR `filter_beyond_positive_pairs`
criteria.
This function can also compute cross-chain ESS following
[Vehtari et al. (2019)][2] by specifying the `cross_chain_dims` argument.
Cross-chain ESS takes into account the cross-chain variance to reduce the ESS
in cases where the chains are not mixing well. In general, this will be a
smaller number than computing the ESS for individual chains and then summing
them. In an extreme case where the chains have fallen into K non-mixing modes,
this function will return ESS ~ K. Even when chains are mixing well it is
still preferrable to compute cross-chain ESS via this method because it will
reduce the noise in the estimate of `R_k`, reducing the need for truncation.
Args:
states: `Tensor` or Python structure of `Tensor` objects. Dimension zero
should index identically distributed states.
filter_threshold: `Tensor` or Python structure of `Tensor` objects. Must
broadcast with `state`. The sequence of auto-correlations is truncated
after the first appearance of a term less than `filter_threshold`.
Setting to `None` means we use no threshold filter. Since `|R_k| <= 1`,
setting to any number less than `-1` has the same effect. Ignored if
`filter_beyond_positive_pairs` is `True`.
filter_beyond_lag: `Tensor` or Python structure of `Tensor` objects. Must
be `int`-like and scalar valued. The sequence of auto-correlations is
truncated to this length. Setting to `None` means we do not filter based
on the size of lags.
filter_beyond_positive_pairs: Python boolean. If `True`, only consider the
initial auto-correlation sequence where the pairwise sums are positive.
cross_chain_dims: An integer `Tensor` or a structure of integer `Tensors`
corresponding to each state component. If a list of `states` is provided,
then this argument should also be a list of the same length. Which
dimensions of `states` to treat as independent chains that ESS will be
summed over. If `None`, no summation is performed. Note this requires at
least 2 chains.
validate_args: Whether to add runtime checks of argument validity. If False,
and arguments are incorrect, correct behavior is not guaranteed.
name: `String` name to prepend to created ops.
Returns:
ess: `Tensor` structure parallel to `states`. The effective sample size of
each component of `states`. If `cross_chain_dims` is None, the shape will
be `states.shape[1:]`. Otherwise, the shape is `tf.reduce_mean(states,
cross_chain_dims).shape[1:]`.
Raises:
ValueError: If `states` and `filter_threshold` or `states` and
`filter_beyond_lag` are both structures of different shapes.
ValueError: If `cross_chain_dims` is not `None` and there are less than 2
chains.
#### Examples
We use ESS to estimate standard error.
```
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])
# Get 1000 states from one chain.
states = tfp.mcmc.sample_chain(
num_burnin_steps=200,
num_results=1000,
current_state=tf.constant([0., 0.]),
trace_fn=None,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target.log_prob,
step_size=0.05,
num_leapfrog_steps=20))
states.shape
==> (1000, 2)
ess = effective_sample_size(states, filter_beyond_positive_pairs=True)
==> Shape (2,) Tensor
mean, variance = tf.nn.moments(states, axis=0)
standard_error = tf.sqrt(variance / ess)
```
#### References
[1]: Charles J. Geyer, Practical Markov chain Monte Carlo (with discussion).
Statistical Science, 7:473-511, 1992.
[2]: Aki Vehtari, Andrew Gelman, Daniel Simpson, Bob Carpenter, Paul-Christian
Burkner. Rank-normalization, folding, and localization: An improved R-hat
for assessing convergence of MCMC, 2019. Retrieved from
http://arxiv.org/abs/1903.08008
"""
if cross_chain_dims is None:
cross_chain_dims = nest_util.broadcast_structure(states, None)
filter_beyond_lag = nest_util.broadcast_structure(states, filter_beyond_lag)
filter_threshold = nest_util.broadcast_structure(states, filter_threshold)
filter_beyond_positive_pairs = nest_util.broadcast_structure(
states, filter_beyond_positive_pairs)
# Process items, one at a time.
def single_state(*args):
return _effective_sample_size_single_state(
*args, validate_args=validate_args)
with tf.name_scope('effective_sample_size' if name is None else name):
return nest.map_structure_up_to(
states,
single_state,
states, filter_beyond_lag, filter_threshold,
filter_beyond_positive_pairs, cross_chain_dims)
def _effective_sample_size_single_state(states, filter_beyond_lag,
filter_threshold,
filter_beyond_positive_pairs,
cross_chain_dims,
validate_args):
"""ESS computation for one single Tensor argument."""
with tf.name_scope('effective_sample_size_single_state'):
states = tf.convert_to_tensor(states, name='states')
dt = states.dtype
# filter_beyond_lag == None ==> auto_corr is the full sequence.
auto_cov = stats.auto_correlation(
states, axis=0, max_lags=filter_beyond_lag, normalize=False)
n = _axis_size(states, axis=0)
if cross_chain_dims is not None:
num_chains = _axis_size(states, cross_chain_dims)
num_chains_ = tf.get_static_value(num_chains)
assertions = []
msg = ('When `cross_chain_dims` is not `None`, there must be > 1 chain '
'in `states`.')
if num_chains_ is not None:
if num_chains_ < 2:
raise ValueError(msg)
elif validate_args:
assertions.append(
assert_util.assert_greater(num_chains, 1., message=msg))
with tf.control_dependencies(assertions):
# We're computing the R[k] from equation 10 of Vehtari et al.
# (2019):
#
# R[k] := 1 - (W - 1/C * Sum_{c=1}^C s_c**2 R[k, c]) / (var^+),
#
# where:
# C := number of chains
# N := length of chains
# x_hat[c] := 1 / N Sum_{n=1}^N x[n, c], chain mean.
# x_hat := 1 / C Sum_{c=1}^C x_hat[c], overall mean.
# W := 1/C Sum_{c=1}^C s_c**2, within-chain variance.
# B := N / (C - 1) Sum_{c=1}^C (x_hat[c] - x_hat)**2, between chain
# variance.
# s_c**2 := 1 / (N - 1) Sum_{n=1}^N (x[n, c] - x_hat[c])**2, chain
# variance
# R[k, m] := auto_corr[k, m, ...], auto-correlation indexed by chain.
# var^+ := (N - 1) / N * W + B / N
cross_chain_dims = ps.non_negative_axis(
cross_chain_dims, ps.rank(states))
# B / N
between_chain_variance_div_n = _reduce_variance(
tf.reduce_mean(states, axis=0),
biased=False, # This makes the denominator be C - 1.
axis=cross_chain_dims - 1)
# W * (N - 1) / N
biased_within_chain_variance = tf.reduce_mean(auto_cov[0],
cross_chain_dims - 1)
# var^+
approx_variance = (
biased_within_chain_variance + between_chain_variance_div_n)
# 1/C * Sum_{c=1}^C s_c**2 R[k, c]
mean_auto_cov = tf.reduce_mean(auto_cov, cross_chain_dims)
auto_corr = 1. - (biased_within_chain_variance -
mean_auto_cov) / approx_variance
else:
auto_corr = auto_cov / auto_cov[:1]
num_chains = 1
# With R[k] := auto_corr[k, ...],
# ESS = N / {1 + 2 * Sum_{k=1}^N R[k] * (N - k) / N}
# = N / {-1 + 2 * Sum_{k=0}^N R[k] * (N - k) / N} (since R[0] = 1)
# approx N / {-1 + 2 * Sum_{k=0}^M R[k] * (N - k) / N}
# where M is the filter_beyond_lag truncation point chosen above.
# Get the factor (N - k) / N, and give it shape [M, 1,...,1], having total
# ndims the same as auto_corr
k = tf.range(0., _axis_size(auto_corr, axis=0))
nk_factor = (n - k) / n
if tensorshape_util.rank(auto_corr.shape) is not None:
new_shape = [-1] + [1] * (tensorshape_util.rank(auto_corr.shape) - 1)
else:
new_shape = tf.concat(
([-1],
tf.ones([tf.rank(auto_corr) - 1], dtype=tf.int32)),
axis=0)
nk_factor = tf.reshape(nk_factor, new_shape)
weighted_auto_corr = nk_factor * auto_corr
if filter_beyond_positive_pairs:
def _sum_pairs(x):
x_len = ps.shape(x)[0]
# For odd sequences, we drop the final value.
x = x[:x_len - x_len % 2]
new_shape = ps.concat([[x_len // 2, 2], ps.shape(x)[1:]], axis=0)
return tf.reduce_sum(tf.reshape(x, new_shape), 1)
# Pairwise sums are all positive for auto-correlation spectra derived from
# reversible MCMC chains.
# E.g. imagine the pairwise sums are [0.2, 0.1, -0.1, -0.2]
# Step 1: mask = [False, False, True, True]
mask = _sum_pairs(auto_corr) < 0.
# Step 2: mask = [0, 0, 1, 1]
mask = tf.cast(mask, dt)
# Step 3: mask = [0, 0, 1, 2]
mask = tf.cumsum(mask, axis=0)
# Step 4: mask = [1, 1, 0, 0]
mask = tf.maximum(1. - mask, 0.)
# N.B. this reduces the length of weighted_auto_corr by a factor of 2.
# It still works fine in the formula below.
weighted_auto_corr = _sum_pairs(weighted_auto_corr) * mask
elif filter_threshold is not None:
filter_threshold = tf.convert_to_tensor(
filter_threshold, dtype=dt, name='filter_threshold')
# Get a binary mask to zero out values of auto_corr below the threshold.
# mask[i, ...] = 1 if auto_corr[j, ...] > threshold for all j <= i,
# mask[i, ...] = 0, otherwise.
# So, along dimension zero, the mask will look like [1, 1, ..., 0, 0,...]
# Building step by step,
# Assume auto_corr = [1, 0.5, 0.0, 0.3], and filter_threshold = 0.2.
# Step 1: mask = [False, False, True, False]
mask = auto_corr < filter_threshold
# Step 2: mask = [0, 0, 1, 0]
mask = tf.cast(mask, dtype=dt)
# Step 3: mask = [0, 0, 1, 1]
mask = tf.cumsum(mask, axis=0)
# Step 4: mask = [1, 1, 0, 0]
mask = tf.maximum(1. - mask, 0.)
weighted_auto_corr *= mask
return num_chains * n / (-1 + 2 * tf.reduce_sum(weighted_auto_corr, axis=0))
def potential_scale_reduction(chains_states,
independent_chain_ndims=1,
split_chains=False,
validate_args=False,
name=None):
"""Gelman and Rubin (1992)'s potential scale reduction for chain convergence.
Given `N > 1` states from each of `C > 1` independent chains, the potential
scale reduction factor, commonly referred to as R-hat, measures convergence of
the chains (to the same target) by testing for equality of means.
Specifically, R-hat measures the degree to which variance (of the means)
between chains exceeds what one would expect if the chains were identically
distributed. See [Gelman and Rubin (1992)][1]; [Brooks and Gelman (1998)][2].
Some guidelines:
* The initial state of the chains should be drawn from a distribution
overdispersed with respect to the target.
* If all chains converge to the target, then as `N --> infinity`, R-hat --> 1.
Before that, R-hat > 1 (except in pathological cases, e.g. if the chain
paths were identical).
* The above holds for any number of chains `C > 1`. Increasing `C` does
improve effectiveness of the diagnostic.
* Sometimes, R-hat < 1.2 is used to indicate approximate convergence, but of
course this is problem-dependent. See [Brooks and Gelman (1998)][2].
* R-hat only measures non-convergence of the mean. If higher moments, or
other statistics are desired, a different diagnostic should be used. See
[Brooks and Gelman (1998)][2].
Args:
chains_states: `Tensor` or Python structure of `Tensor`s representing the
states of a Markov Chain at each result step. The `ith` state is
assumed to have shape `[Ni, Ci1, Ci2,...,CiD] + A`.
Dimension `0` indexes the `Ni > 1` result steps of the Markov Chain.
Dimensions `1` through `D` index the `Ci1 x ... x CiD` independent
chains to be tested for convergence to the same target.
The remaining dimensions, `A`, can have any shape (even empty).
independent_chain_ndims: Integer type `Tensor` with value `>= 1` giving the
number of dimensions, from `dim = 1` to `dim = D`, holding independent
chain results to be tested for convergence.
split_chains: Python `bool`. If `True`, divide samples from each chain into
first and second halves, treating these as separate chains. This makes
R-hat more robust to non-stationary chains, and is recommended in [3].
validate_args: Whether to add runtime checks of argument validity. If False,
and arguments are incorrect, correct behavior is not guaranteed.
name: `String` name to prepend to created tf. Default:
`potential_scale_reduction`.
Returns:
`Tensor` structure parallel to `chains_states` representing the
R-hat statistic for the state(s). Same `dtype` as `state`, and
shape equal to `state.shape[1 + independent_chain_ndims:]`.
Raises:
ValueError: If `independent_chain_ndims < 1`.
#### Examples
Diagnosing convergence by monitoring 10 chains that each attempt to
sample from a 2-variate normal.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])
# Get 10 (2x) overdispersed initial states.
initial_state = target.sample(10) * 2.
==> (10, 2)
# Get 1000 samples from the 10 independent chains.
chains_states = tfp.mcmc.sample_chain(
num_burnin_steps=200,
num_results=1000,
current_state=initial_state,
trace_fn=None,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target.log_prob,
step_size=0.05,
num_leapfrog_steps=20))
chains_states.shape
==> (1000, 10, 2)
rhat = tfp.mcmc.diagnostic.potential_scale_reduction(
chains_states, independent_chain_ndims=1)
# The second dimension needed a longer burn-in.
rhat.eval()
==> [1.05, 1.3]
```
To see why R-hat is reasonable, let `X` be a random variable drawn uniformly
from the combined states (combined over all chains). Then, in the limit
`N, C --> infinity`, with `E`, `Var` denoting expectation and variance,
```R-hat = ( E[Var[X | chain]] + Var[E[X | chain]] ) / E[Var[X | chain]].```
Using the law of total variance, the numerator is the variance of the combined
states, and the denominator is the total variance minus the variance of the
the individual chain means. If the chains are all drawing from the same
distribution, they will have the same mean, and thus the ratio should be one.
#### References
[1]: Stephen P. Brooks and Andrew Gelman. General Methods for Monitoring
Convergence of Iterative Simulations. _Journal of Computational and
Graphical Statistics_, 7(4), 1998.
[2]: Andrew Gelman and Donald B. Rubin. Inference from Iterative Simulation
Using Multiple Sequences. _Statistical Science_, 7(4):457-472, 1992.
[3]: Aki Vehtari, Andrew Gelman, Daniel Simpson, Bob Carpenter, Paul-Christian
Burkner. Rank-normalization, folding, and localization: An improved R-hat
for assessing convergence of MCMC, 2019. Retrieved from
http://arxiv.org/abs/1903.08008
"""
# tf.get_static_value returns None iff a constant value (as a numpy
# array) is not efficiently computable. Therefore, we try constant_value then
# check for None.
icn_const_ = tf.get_static_value(
ps.convert_to_shape_tensor(independent_chain_ndims))
if icn_const_ is not None:
independent_chain_ndims = icn_const_
if icn_const_ < 1:
raise ValueError(
'Argument `independent_chain_ndims` must be `>= 1`, found: {}'.format(
independent_chain_ndims))
def single_state(s):
return _potential_scale_reduction_single_state(
s, independent_chain_ndims, split_chains, validate_args)
with tf.name_scope('potential_scale_reduction' if name is None else name):
return tf.nest.map_structure(single_state, chains_states)
def _potential_scale_reduction_single_state(state, independent_chain_ndims,
split_chains, validate_args):
"""potential_scale_reduction for one single state `Tensor`."""
# casting integers to floats for floating-point division
# check to see if the `state` is a numpy object for the numpy test suite
if dtype_util.as_numpy_dtype(state.dtype) is np.int64:
state = tf.cast(state, tf.float64)
elif dtype_util.is_integer(state.dtype):
state = tf.cast(state, tf.float32)
with tf.name_scope('potential_scale_reduction_single_state'):
# We assume exactly one leading dimension indexes e.g. correlated samples
# from each Markov chain.
state = tf.convert_to_tensor(state, name='state')
n_samples_ = tf.compat.dimension_value(state.shape[0])
if n_samples_ is not None: # If available statically.
if split_chains and n_samples_ < 4:
raise ValueError(
'Must provide at least 4 samples when splitting chains. '
'Found {}'.format(n_samples_))
if not split_chains and n_samples_ < 2:
raise ValueError(
'Must provide at least 2 samples. Found {}'.format(n_samples_))
elif validate_args:
if split_chains:
assertions = [assert_util.assert_greater(
ps.shape(state)[0], 4,
message='Must provide at least 4 samples when splitting chains.')]
with tf.control_dependencies(assertions):
state = tf.identity(state)
else:
assertions = [assert_util.assert_greater(
ps.shape(state)[0], 2,
message='Must provide at least 2 samples.')]
with tf.control_dependencies(assertions):
state = tf.identity(state)
# Define so it's not a magic number.
# Warning! `if split_chains` logic assumes this is 1!
sample_ndims = 1
if split_chains:
# Split the sample dimension in half, doubling the number of
# independent chains.
# For odd number of samples, keep all but the last sample.
state_shape = ps.shape(state)
n_samples = state_shape[0]
state = state[:n_samples - n_samples % 2]
# Suppose state = [0, 1, 2, 3, 4, 5]
# Step 1: reshape into [[0, 1, 2], [3, 4, 5]]
# E.g. reshape states of shape [a, b] into [2, a//2, b].
state = tf.reshape(
state,
ps.concat([[2, n_samples // 2], state_shape[1:]], axis=0)
)
# Step 2: Put the size `2` dimension in the right place to be treated as a
# chain, changing [[0, 1, 2], [3, 4, 5]] into [[0, 3], [1, 4], [2, 5]],
# reshaping [2, a//2, b] into [a//2, 2, b].
state = tf.transpose(
a=state,
perm=ps.concat(
[[1, 0], ps.range(2, ps.rank(state))], axis=0))
# We're treating the new dim as indexing 2 chains, so increment.
independent_chain_ndims += 1
sample_axis = ps.range(0, sample_ndims)
chain_axis = ps.range(sample_ndims,
sample_ndims + independent_chain_ndims)
sample_and_chain_axis = ps.range(
0, sample_ndims + independent_chain_ndims)
n = _axis_size(state, sample_axis)
m = _axis_size(state, chain_axis)
# In the language of Brooks and Gelman (1998),
# B / n is the between chain variance, the variance of the chain means.
# W is the within sequence variance, the mean of the chain variances.
b_div_n = _reduce_variance(
tf.reduce_mean(state, axis=sample_axis, keepdims=True),
sample_and_chain_axis,
biased=False)
w = tf.reduce_mean(
_reduce_variance(state, sample_axis, keepdims=True, biased=False),
axis=sample_and_chain_axis)
# sigma^2_+ is an estimate of the true variance, which would be unbiased if
# each chain was drawn from the target. c.f. "law of total variance."
sigma_2_plus = ((n - 1) / n) * w + b_div_n
return ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n)
# TODO(b/72873233) Move some variant of this to tfd.sample_stats.
def _reduce_variance(x, axis=None, biased=True, keepdims=False):
with tf.name_scope('reduce_variance'):
x = tf.convert_to_tensor(x, name='x')
mean = tf.reduce_mean(x, axis=axis, keepdims=True)
biased_var = tf.reduce_mean(
tf.math.squared_difference(x, mean), axis=axis, keepdims=keepdims)
if biased:
return biased_var
n = _axis_size(x, axis)
return (n / (n - 1.)) * biased_var
def _axis_size(x, axis=None):
"""Get number of elements of `x` in `axis`, as type `x.dtype`."""
if axis is None:
return ps.cast(ps.size(x), x.dtype)
return ps.cast(
ps.reduce_prod(
ps.gather(ps.shape(x), axis)), x.dtype)
| [((189, 22, 189, 78), 'tensorflow_probability.python.internal.nest_util.broadcast_structure', 'nest_util.broadcast_structure', ({(189, 52, 189, 58): 'states', (189, 60, 189, 77): 'filter_beyond_lag'}, {}), '(states, filter_beyond_lag)', False, 'from tensorflow_probability.python.internal import nest_util\n'), ((190, 21, 190, 76), 'tensorflow_probability.python.internal.nest_util.broadcast_structure', 'nest_util.broadcast_structure', ({(190, 51, 190, 57): 'states', (190, 59, 190, 75): 'filter_threshold'}, {}), '(states, filter_threshold)', False, 'from tensorflow_probability.python.internal import nest_util\n'), ((191, 33, 192, 43), 'tensorflow_probability.python.internal.nest_util.broadcast_structure', 'nest_util.broadcast_structure', ({(192, 6, 192, 12): 'states', (192, 14, 192, 42): 'filter_beyond_positive_pairs'}, {}), '(states, filter_beyond_positive_pairs)', False, 'from tensorflow_probability.python.internal import nest_util\n'), ((188, 23, 188, 66), 'tensorflow_probability.python.internal.nest_util.broadcast_structure', 'nest_util.broadcast_structure', ({(188, 53, 188, 59): 'states', (188, 61, 188, 65): 'None'}, {}), '(states, None)', False, 'from tensorflow_probability.python.internal import nest_util\n'), ((198, 7, 198, 71), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', ({(198, 21, 198, 70): "('effective_sample_size' if name is None else name)"}, {}), "('effective_sample_size' if name is None else name)", True, 'import tensorflow.compat.v2 as tf\n'), ((199, 11, 203, 55), 'tensorflow.python.util.nest.map_structure_up_to', 'nest.map_structure_up_to', ({(200, 8, 200, 14): 'states', (201, 8, 201, 20): 'single_state', (202, 8, 202, 14): 'states', (202, 16, 202, 33): 'filter_beyond_lag', (202, 35, 202, 51): 'filter_threshold', (203, 8, 203, 36): 'filter_beyond_positive_pairs', (203, 38, 203, 54): 'cross_chain_dims'}, {}), '(states, single_state, states, filter_beyond_lag,\n filter_threshold, filter_beyond_positive_pairs, cross_chain_dims)', False, 'from tensorflow.python.util import nest\n'), ((213, 7, 213, 58), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', ({(213, 21, 213, 57): '"""effective_sample_size_single_state"""'}, {}), "('effective_sample_size_single_state')", True, 'import tensorflow.compat.v2 as tf\n'), ((215, 13, 215, 56), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (), '', True, 'import tensorflow.compat.v2 as tf\n'), ((219, 15, 220, 68), 'tensorflow_probability.python.stats.auto_correlation', 'stats.auto_correlation', (), '', False, 'from tensorflow_probability.python import stats\n'), ((294, 16, 294, 48), 'tensorflow.compat.v2.reshape', 'tf.reshape', ({(294, 27, 294, 36): 'nk_factor', (294, 38, 294, 47): 'new_shape'}, {}), '(nk_factor, new_shape)', True, 'import tensorflow.compat.v2 as tf\n'), ((464, 6, 464, 57), 'tensorflow_probability.python.internal.prefer_static.convert_to_shape_tensor', 'ps.convert_to_shape_tensor', ({(464, 33, 464, 56): 'independent_chain_ndims'}, {}), '(independent_chain_ndims)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((475, 7, 475, 75), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', ({(475, 21, 475, 74): "('potential_scale_reduction' if name is None else name)"}, {}), "('potential_scale_reduction' if name is None else name)", True, 'import tensorflow.compat.v2 as tf\n'), ((476, 11, 476, 61), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', ({(476, 33, 476, 45): 'single_state', (476, 47, 476, 60): 'chains_states'}, {}), '(single_state, chains_states)', True, 'import tensorflow.compat.v2 as tf\n'), ((484, 5, 484, 43), 'tensorflow_probability.python.internal.dtype_util.as_numpy_dtype', 'dtype_util.as_numpy_dtype', ({(484, 31, 484, 42): 'state.dtype'}, {}), '(state.dtype)', False, 'from tensorflow_probability.python.internal import dtype_util\n'), ((485, 12, 485, 38), 'tensorflow.compat.v2.cast', 'tf.cast', ({(485, 20, 485, 25): 'state', (485, 27, 485, 37): 'tf.float64'}, {}), '(state, tf.float64)', True, 'import tensorflow.compat.v2 as tf\n'), ((486, 7, 486, 41), 'tensorflow_probability.python.internal.dtype_util.is_integer', 'dtype_util.is_integer', ({(486, 29, 486, 40): 'state.dtype'}, {}), '(state.dtype)', False, 'from tensorflow_probability.python.internal import dtype_util\n'), ((488, 7, 488, 62), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', ({(488, 21, 488, 61): '"""potential_scale_reduction_single_state"""'}, {}), "('potential_scale_reduction_single_state')", True, 'import tensorflow.compat.v2 as tf\n'), ((491, 12, 491, 53), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (), '', True, 'import tensorflow.compat.v2 as tf\n'), ((493, 17, 493, 58), 'tensorflow.compat.v2.compat.dimension_value', 'tf.compat.dimension_value', ({(493, 43, 493, 57): 'state.shape[0]'}, {}), '(state.shape[0])', True, 'import tensorflow.compat.v2 as tf\n'), ((547, 18, 547, 43), 'tensorflow_probability.python.internal.prefer_static.range', 'ps.range', ({(547, 27, 547, 28): '0', (547, 30, 547, 42): 'sample_ndims'}, {}), '(0, sample_ndims)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((548, 17, 549, 65), 'tensorflow_probability.python.internal.prefer_static.range', 'ps.range', ({(548, 26, 548, 38): 'sample_ndims', (549, 26, 549, 64): 'sample_ndims + independent_chain_ndims'}, {}), '(sample_ndims, sample_ndims + independent_chain_ndims)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((550, 28, 551, 50), 'tensorflow_probability.python.internal.prefer_static.range', 'ps.range', ({(551, 8, 551, 9): '0', (551, 11, 551, 49): 'sample_ndims + independent_chain_ndims'}, {}), '(0, sample_ndims + independent_chain_ndims)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((575, 7, 575, 39), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', ({(575, 21, 575, 38): '"""reduce_variance"""'}, {}), "('reduce_variance')", True, 'import tensorflow.compat.v2 as tf\n'), ((576, 8, 576, 41), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (), '', True, 'import tensorflow.compat.v2 as tf\n'), ((577, 11, 577, 54), 'tensorflow.compat.v2.reduce_mean', 'tf.reduce_mean', (), '', True, 'import tensorflow.compat.v2 as tf\n'), ((225, 20, 225, 51), 'tensorflow.compat.v2.get_static_value', 'tf.get_static_value', ({(225, 40, 225, 50): 'num_chains'}, {}), '(num_chains)', True, 'import tensorflow.compat.v2 as tf\n'), ((287, 7, 287, 45), 'tensorflow_probability.python.internal.tensorshape_util.rank', 'tensorshape_util.rank', ({(287, 29, 287, 44): 'auto_corr.shape'}, {}), '(auto_corr.shape)', False, 'from tensorflow_probability.python.internal import tensorshape_util\n'), ((311, 13, 311, 30), 'tensorflow.compat.v2.cast', 'tf.cast', ({(311, 21, 311, 25): 'mask', (311, 27, 311, 29): 'dt'}, {}), '(mask, dt)', True, 'import tensorflow.compat.v2 as tf\n'), ((313, 13, 313, 36), 'tensorflow.compat.v2.cumsum', 'tf.cumsum', (), '', True, 'import tensorflow.compat.v2 as tf\n'), ((315, 13, 315, 38), 'tensorflow.compat.v2.maximum', 'tf.maximum', ({(315, 24, 315, 33): '1.0 - mask', (315, 35, 315, 37): '0.0'}, {}), '(1.0 - mask, 0.0)', True, 'import tensorflow.compat.v2 as tf\n'), ((487, 12, 487, 38), 'tensorflow.compat.v2.cast', 'tf.cast', ({(487, 20, 487, 25): 'state', (487, 27, 487, 37): 'tf.float32'}, {}), '(state, tf.float32)', True, 'import tensorflow.compat.v2 as tf\n'), ((525, 20, 525, 35), 'tensorflow_probability.python.internal.prefer_static.shape', 'ps.shape', ({(525, 29, 525, 34): 'state'}, {}), '(state)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((560, 8, 560, 62), 'tensorflow.compat.v2.reduce_mean', 'tf.reduce_mean', (), '', True, 'import tensorflow.compat.v2 as tf\n'), ((579, 8, 579, 43), 'tensorflow.compat.v2.math.squared_difference', 'tf.math.squared_difference', ({(579, 35, 579, 36): 'x', (579, 38, 579, 42): 'mean'}, {}), '(x, mean)', True, 'import tensorflow.compat.v2 as tf\n'), ((589, 19, 589, 29), 'tensorflow_probability.python.internal.prefer_static.size', 'ps.size', ({(589, 27, 589, 28): 'x'}, {}), '(x)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((237, 11, 237, 46), 'tensorflow.compat.v2.control_dependencies', 'tf.control_dependencies', ({(237, 35, 237, 45): 'assertions'}, {}), '(assertions)', True, 'import tensorflow.compat.v2 as tf\n'), ((264, 39, 265, 75), 'tensorflow.compat.v2.reduce_mean', 'tf.reduce_mean', ({(264, 54, 264, 65): 'auto_cov[0]', (265, 54, 265, 74): 'cross_chain_dims - 1'}, {}), '(auto_cov[0], cross_chain_dims - 1)', True, 'import tensorflow.compat.v2 as tf\n'), ((270, 24, 270, 66), 'tensorflow.compat.v2.reduce_mean', 'tf.reduce_mean', ({(270, 39, 270, 47): 'auto_cov', (270, 49, 270, 65): 'cross_chain_dims'}, {}), '(auto_cov, cross_chain_dims)', True, 'import tensorflow.compat.v2 as tf\n'), ((321, 25, 322, 62), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (), '', True, 'import tensorflow.compat.v2 as tf\n'), ((332, 13, 332, 36), 'tensorflow.compat.v2.cast', 'tf.cast', (), '', True, 'import tensorflow.compat.v2 as tf\n'), ((334, 13, 334, 36), 'tensorflow.compat.v2.cumsum', 'tf.cumsum', (), '', True, 'import tensorflow.compat.v2 as tf\n'), ((336, 13, 336, 38), 'tensorflow.compat.v2.maximum', 'tf.maximum', ({(336, 24, 336, 33): '1.0 - mask', (336, 35, 336, 37): '0.0'}, {}), '(1.0 - mask, 0.0)', True, 'import tensorflow.compat.v2 as tf\n'), ((534, 10, 534, 67), 'tensorflow_probability.python.internal.prefer_static.concat', 'ps.concat', (), '', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((592, 20, 592, 31), 'tensorflow_probability.python.internal.prefer_static.shape', 'ps.shape', ({(592, 29, 592, 30): 'x'}, {}), '(x)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((257, 30, 257, 45), 'tensorflow_probability.python.internal.prefer_static.rank', 'ps.rank', ({(257, 38, 257, 44): 'states'}, {}), '(states)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((260, 12, 260, 42), 'tensorflow.compat.v2.reduce_mean', 'tf.reduce_mean', (), '', True, 'import tensorflow.compat.v2 as tf\n'), ((299, 16, 299, 27), 'tensorflow_probability.python.internal.prefer_static.shape', 'ps.shape', ({(299, 25, 299, 26): 'x'}, {}), '(x)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((303, 29, 303, 53), 'tensorflow.compat.v2.reshape', 'tf.reshape', ({(303, 40, 303, 41): 'x', (303, 43, 303, 52): 'new_shape'}, {}), '(x, new_shape)', True, 'import tensorflow.compat.v2 as tf\n'), ((339, 38, 339, 79), 'tensorflow.compat.v2.reduce_sum', 'tf.reduce_sum', (), '', True, 'import tensorflow.compat.v2 as tf\n'), ((235, 12, 235, 67), 'tensorflow_probability.python.internal.assert_util.assert_greater', 'assert_util.assert_greater', (), '', False, 'from tensorflow_probability.python.internal import assert_util\n'), ((288, 32, 288, 70), 'tensorflow_probability.python.internal.tensorshape_util.rank', 'tensorshape_util.rank', ({(288, 54, 288, 69): 'auto_corr.shape'}, {}), '(auto_corr.shape)', False, 'from tensorflow_probability.python.internal import tensorshape_util\n'), ((507, 13, 507, 48), 'tensorflow.compat.v2.control_dependencies', 'tf.control_dependencies', ({(507, 37, 507, 47): 'assertions'}, {}), '(assertions)', True, 'import tensorflow.compat.v2 as tf\n'), ((508, 18, 508, 36), 'tensorflow.compat.v2.identity', 'tf.identity', ({(508, 30, 508, 35): 'state'}, {}), '(state)', True, 'import tensorflow.compat.v2 as tf\n'), ((513, 13, 513, 48), 'tensorflow.compat.v2.control_dependencies', 'tf.control_dependencies', ({(513, 37, 513, 47): 'assertions'}, {}), '(assertions)', True, 'import tensorflow.compat.v2 as tf\n'), ((514, 18, 514, 36), 'tensorflow.compat.v2.identity', 'tf.identity', ({(514, 30, 514, 35): 'state'}, {}), '(state)', True, 'import tensorflow.compat.v2 as tf\n'), ((302, 48, 302, 59), 'tensorflow_probability.python.internal.prefer_static.shape', 'ps.shape', ({(302, 57, 302, 58): 'x'}, {}), '(x)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((292, 20, 292, 38), 'tensorflow.compat.v2.rank', 'tf.rank', ({(292, 28, 292, 37): 'auto_corr'}, {}), '(auto_corr)', True, 'import tensorflow.compat.v2 as tf\n'), ((505, 12, 505, 27), 'tensorflow_probability.python.internal.prefer_static.shape', 'ps.shape', ({(505, 21, 505, 26): 'state'}, {}), '(state)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((511, 12, 511, 27), 'tensorflow_probability.python.internal.prefer_static.shape', 'ps.shape', ({(511, 21, 511, 26): 'state'}, {}), '(state)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((542, 35, 542, 49), 'tensorflow_probability.python.internal.prefer_static.rank', 'ps.rank', ({(542, 43, 542, 48): 'state'}, {}), '(state)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n')] |
jiangyuang/ModelPruningLibrary | mpl/models/leaf.py | 9c8ba5a3c5d118f37768d5d42254711f48d88745 | from torch import nn as nn
from .base_model import BaseModel
from ..nn.conv2d import DenseConv2d
from ..nn.linear import DenseLinear
__all__ = ["Conv2", "conv2", "Conv4", "conv4"]
class Conv2(BaseModel):
def __init__(self):
super(Conv2, self).__init__()
self.features = nn.Sequential(DenseConv2d(1, 32, kernel_size=5, padding=2), # 32x28x28
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2), # 32x14x14
DenseConv2d(32, 64, kernel_size=5, padding=2), # 64x14x14
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2)) # 64x7x7
self.classifier = nn.Sequential(DenseLinear(64 * 7 * 7, 2048),
nn.ReLU(inplace=True),
DenseLinear(2048, 62))
self.collect_prunable_layers()
def forward(self, inp):
out = self.features(inp)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
class Conv4(BaseModel):
def __init__(self):
super(Conv4, self).__init__()
self.features = nn.Sequential(DenseConv2d(3, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=2),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=2),
nn.BatchNorm2d(32),
nn.MaxPool2d(2))
self.classifier = DenseLinear(in_features=32 * 6 * 6, out_features=2)
def forward(self, inp):
out = self.features(inp)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def conv2() -> Conv2:
return Conv2()
def conv4() -> Conv4:
return Conv4()
# TODO: define pretrain etc.
| [((14, 38, 14, 59), 'torch.nn.ReLU', 'nn.ReLU', (), '', True, 'from torch import nn as nn\n'), ((15, 38, 15, 63), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (), '', True, 'from torch import nn as nn\n'), ((17, 38, 17, 59), 'torch.nn.ReLU', 'nn.ReLU', (), '', True, 'from torch import nn as nn\n'), ((18, 38, 18, 63), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (), '', True, 'from torch import nn as nn\n'), ((21, 40, 21, 61), 'torch.nn.ReLU', 'nn.ReLU', (), '', True, 'from torch import nn as nn\n'), ((36, 38, 36, 56), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(36, 53, 36, 55): '32'}, {}), '(32)', True, 'from torch import nn as nn\n'), ((37, 38, 37, 53), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ({(37, 51, 37, 52): '2'}, {}), '(2)', True, 'from torch import nn as nn\n'), ((39, 38, 39, 56), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(39, 53, 39, 55): '32'}, {}), '(32)', True, 'from torch import nn as nn\n'), ((40, 38, 40, 53), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ({(40, 51, 40, 52): '2'}, {}), '(2)', True, 'from torch import nn as nn\n'), ((42, 38, 42, 56), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(42, 53, 42, 55): '32'}, {}), '(32)', True, 'from torch import nn as nn\n'), ((43, 38, 43, 53), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ({(43, 51, 43, 52): '2'}, {}), '(2)', True, 'from torch import nn as nn\n'), ((45, 38, 45, 56), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(45, 53, 45, 55): '32'}, {}), '(32)', True, 'from torch import nn as nn\n'), ((46, 38, 46, 53), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ({(46, 51, 46, 52): '2'}, {}), '(2)', True, 'from torch import nn as nn\n')] |
quimaguirre/NetworkAnalysis | scripts/generate_network_interactomix.py | c7a4da3ba5696800738b4767065ce29fa0020d79 | import argparse
import ConfigParser
import sys, os, re
import biana
try: from biana import *
except: sys.exit(10)
import methods_dictionaries as methods_dicts
def main():
options = parse_user_arguments()
generate_network(options)
def parse_user_arguments(*args, **kwds):
parser = argparse.ArgumentParser(
description = "Generate a protein-protein interaction network (implemented for Interactomix platform)",
epilog = "@oliva's lab 2019")
parser.add_argument('-iseed','--seeds_input_file',dest='seed',action = 'store',
help = 'Seeds Input file (default is input_seed)')
parser.add_argument('-radius','--radius_of_subnetwork_around_seeds',dest='radius',default=0,action = 'store',type=int,
help = '''Network is built in a radius of connections around the seed proteins.
If 0, it creates the complete interactome''')
parser.add_argument('-taxid','--TaxID',dest='taxid',action = 'store',default='9606',
help = 'Tax ID (i.e. human=9606 is default if TaxID=0 there is no restriction)')
parser.add_argument('-stype','--seed_type',dest='stype',action = 'store',default='geneid',
help = 'Type of identifier for seeds (default is geneid)')
parser.add_argument('-ttype','--translation_type',dest='ttype',action = 'store',default='accessionnumber',
help = '''Type of identifier for the output translation of codes (default is accessionnumber)
Using "proteinsequence" provides with the longest sequence of all codes''')
parser.add_argument('-trans','--translation_of_nodes_file',dest='translation_file',action = 'store',default='translation_nodes.txt',
help = 'File with the translation of codes from BIANA to the selected type for all nodes')
parser.add_argument('-strans','--translation_of_seeds_file',dest='translation_seeds_file',action = 'store',default='translation_seeds_to_BIANA_codes.txt',
help = 'File with the translation of codes from the introduced type of code to BIANA codes')
parser.add_argument('-edge','--edge_file',dest='edge',action = 'store', default='biana_edges',
help = 'Output file with edges(default is biana_edges)')
parser.add_argument('-node','--node_file',dest='node',action = 'store', default='biana_nodes',
help = 'Output file with nodes(default is biana_nodes)')
parser.add_argument('-format','--output_format',dest='format',action = 'store',default='sif',
help = '''Format file of the edge file:\tsif (default), netscore, raw, multi-fields:\n
'sif': <node1>\tscore\t<node2>\n
'netscore': <node1>\t<node2>\t<score>\n
'raw': <node1>\t<node2>\n
'multi-fields' : <node1>\t<node2>\t<sources>\t<method_ids>\t<method_names>\t<pmids>\n''')
parser.add_argument('-rAFF','--restricted_to_TAP',dest='restricted_to_TAP',action = 'store_true',
help = 'Flag to use interactions at least described by affinity methods (i.e. Tandem Affinity Purification)')
parser.add_argument('-rY2H','--restricted_to_Y2H',dest='restricted_to_Y2H',action = 'store_true',
help = 'Flag to use interactions at least described by yeast two hybrid methods (Y2H)')
parser.add_argument('-rUSER','--restricted_to_user',dest='restricted_to_user',action = 'store',default='restricted_methods',
help = 'File to use interactions described by the user selected methods')
parser.add_argument('-eAFF','--except_TAP',dest='except_TAP',action = 'store_true',
help = 'Flag to use all interactions except those described by affinity methods (i.e. Tandem Affinity Purification)')
parser.add_argument('-eY2H','--except_Y2H',dest='except_Y2H',action = 'store_true',
help = 'Flag to use all interactions except those described by yeast two hybrid methods (Y2H)')
parser.add_argument('-eUSER','--except_user',dest='except_user',action = 'store',default='restricted_methods',
help = 'File to reject interactions described by the user selected methods')
parser.add_argument('-v','--verbose',dest='verbose',action = 'store_true',
help = 'Flag to use verbose mode')
options=parser.parse_args()
"""
Example:
python generate_network_interactomix.py -iseed example/sample1.txt -radius 1 -taxid 9606 -stype uniprotentry -ttype proteinsequence -trans example/output/example.proteinsequence.trans -strans example/output/example.seeds.trans -edge example/output/example.edges -node example/output/example.nodes -format raw -rY2H
python /home/quim/PHD/Projects/BIANA/scripts/generate_network_interactomix.py -radius 0 -taxid 9606 -edge /home/quim/PHD/Projects/BIANA/outputs/BIANA_2020_geneID_seqtax_drugtarget/human_network_biana_2020.txt -node /home/quim/PHD/Projects/BIANA/outputs/BIANA_2020_geneID_seqtax_drugtarget/human_network_biana_2020_nodes.txt -trans /home/quim/PHD/Projects/BIANA/outputs/BIANA_2020_geneID_seqtax_drugtarget/human_network_biana_2020_translation.txt -ttype geneid -format multi-fields &> /home/quim/PHD/Projects/BIANA/outputs/BIANA_2020_geneID_seqtax_drugtarget/human_network_biana_2020.log
"""
return options
def generate_network(options):
"""
Generates a protein-protein interaction network extracting information from BIANA.
"""
#----------------------#
# FIXED PARAMETERS #
#----------------------#
# Parameters that I have decided to fix
restricted_to_seeds = False
minimum_number_of_methods = 1
minimum_number_of_db = 1
seed_score = 0.1
#--------------------------------------#
# GET INFORMATION FROM CONFIG FILE #
#--------------------------------------#
# Get the program path
main_path = os.path.abspath(os.path.dirname(__file__))
# Read the config file
config_file = os.path.join(main_path, 'config.ini')
config = ConfigParser.ConfigParser()
config.read(config_file)
#--------------------------------------#
# LOAD THE DICTIONARIES OF METHODS #
#--------------------------------------#
# Get the affinity dictionary
affinity_dict = methods_dicts.affinity_dict
affinity=set(affinity_dict.keys())
# Get the complementation dictionary
complementation_dict = methods_dicts.complementation_dict
complementation=set(complementation_dict.keys())
#---------------------------------------#
# GET METHODS THAT WILL BE FILTERED #
#---------------------------------------#
# Check if the user has introduced a file with methods that must be included
if not fileExist(options.restricted_to_user):
print "No restriction on methods selected by the user"
user_selection=False
else:
use_methods=[]
with open(options.restricted_to_user) as input_method_fd:
for line in input_method_fd:
fields = line.strip().split("\t")
use_methods.append(fields[0])
user_selection=True
print "Input to use only Methods:",repr(use_methods)
# Check if the user has introduced a file with methods that have to be excluded
if not fileExist(options.except_user):
print "No rejection of methods selected by the user"
user_rejection=False
else:
no_methods=[]
with open(options.except_user) as input_method_fd:
for line in input_method_fd:
fields = line.strip().split("\t")
no_methods.append(fields[0])
user_rejection=True
print "Input of rejected Methods:",repr(no_methods)
#---------------------------#
# START A BIANA SESSION #
#---------------------------#
print "Open session"
session = create_new_session( sessionID="biana_session",
dbname=config.get('BIANA', 'database'),
dbhost=config.get('BIANA', 'host'),
dbuser=config.get('BIANA', 'user'),
dbpassword=config.get('BIANA', 'password'),
unification_protocol=config.get('BIANA', 'unification_protocol') )
print "Continue"
#------------------------------#
# DEFINE A USER ENTITY SET #
#------------------------------#
# Create network network of expansion if the radius is larger than 0
if restricted_to_seeds or options.radius>0:
# Check if the seeds file exists
if not fileExist(options.seed):
print "File with seeds is missing or not found"
sys.exit(10)
else:
level=options.radius
seed_list = get_seeds_from_file(options.seed)
# If we have Taxonomy restriction, we add it
if options.taxid != "0":
print("Check Proteome %s"%(repr(options.taxid)))
proteome = session.create_new_user_entity_set( identifier_description_list =seed_list,
attribute_restriction_list=[("taxid",options.taxid)],
id_type=options.stype,new_user_entity_set_id="proteome",
negative_attribute_restriction_list=[] )
else:
print('Proteome without Taxonomy restriction')
proteome = session.create_new_user_entity_set( identifier_description_list =seed_list,
id_type=options.stype,new_user_entity_set_id="proteome",
negative_attribute_restriction_list=[] )
else:
level=0
proteome = session.create_new_user_entity_set( identifier_description_list = [("taxid",options.taxid)],
attribute_restriction_list=[], id_type="embedded",
new_user_entity_set_id="proteome",
negative_attribute_restriction_list=[] )
#----------------------------------------------------#
# SELECT THE INTERACTIONS OF THE USER ENTITY SET #
#----------------------------------------------------#
print ("Selecting interactions")
# Select interactions that have been detected at least by affinity technology
if options.restricted_to_TAP:
print ('Using interactions at least described by affinity methods (i.e. Tandem Affinity Purification)')
session.create_network( user_entity_set_id = "proteome" , level = level, relation_type_list=["interaction"] ,
relation_attribute_restriction_list = [("Method_id",400)],
#relation_attribute_restriction_list = [("psimi_name","affinity technology")],
include_relations_last_level = (not restricted_to_seeds) , use_self_relations = False)
# Select interactions that have been detected at least by yeast two hybrid
elif options.restricted_to_Y2H:
print ('Using interactions at least described by yeast-two-hybrid methods (Y2H)')
session.create_network( user_entity_set_id = "proteome" , level = level, relation_type_list=["interaction"] ,
relation_attribute_restriction_list = [("Method_id",18)],
#relation_attribute_restriction_list = [("psimi_name","y2h2")],
include_relations_last_level = (not restricted_to_seeds) , use_self_relations = False)
# Select all interactions
else:
session.create_network( user_entity_set_id = "proteome" , level = level, relation_type_list=["interaction"] ,
include_relations_last_level = (not restricted_to_seeds) , use_self_relations = False)
# Summary of interactions
out_network = open(options.edge,'w')
all_interactions = proteome.getRelations()
print "Num interactions:", len(all_interactions)
#--------------------------------------#
# FILTER THE SELECTED INTERACTIONS #
#--------------------------------------#
nodes=set()
# Get all the user entity ids from the user entity set 'proteome'
all_uEs = proteome.get_user_entity_ids()
# Obtain a dictionary user entity ID => type
uEId_to_type = session.dbAccess.get_user_entity_type(config.get('BIANA', 'unification_protocol'), all_uEs)
skip_interactions=0
for (uE_id1, uE_id2) in all_interactions:
#self.dbAccess.get_external_entities_dict( externalEntityIdsList = [external_entity_relation_id] )
# Get TYPE of user entity
uE1_type = uEId_to_type[uE_id1]
uE2_type = uEId_to_type[uE_id2]
# If type is not protein, we skip the interaction
if uE1_type != 'protein' or uE2_type != 'protein':
if options.verbose:
print('Skipping interaction because the type of one of the user entities is not protein!')
print('Node 1: {}\tType: {}'.format(uE_id1, uE1_type))
print('Node 2: {}\tType: {}'.format(uE_id2, uE2_type))
skip_interactions=skip_interactions+1
continue
eErIDs_list = proteome.get_external_entity_relation_ids(uE_id1, uE_id2)
method_names = set()
method_ids = set()
source_databases = set()
use_method_ids=set()
pubmed_ids = set()
unused_method_names = set()
relationObj_dict = session.dbAccess.get_external_entities_dict(
externalEntityIdsList = eErIDs_list, attribute_list = [],
relation_attribute_list = ["method_id","psimi_name","pubmed"], participant_attribute_list = [] )
num_methods=0
for current_eErID in eErIDs_list:
relationObj = relationObj_dict[current_eErID]
if options.verbose:
print "Interaction: (",uE_id1,",",uE_id2,")"
print relationObj
#if relationObj.get_attribute(attribute_identifier="psimi_name") is not None:
# print "\t".join([ x.value for x in relationObj.get_attribute(attribute_identifier="psimi_name") ])
#if relationObj.get_attribute(attribute_identifier="method_id") is not None:
#print "\t".join([ x.value for x in relationObj.get_attribute(attribute_identifier="method_id") ])
#print relationObj.get_attributes_dict()
#print [ x.value for x in relationObj.get_attributes_dict()["psimi_name"] ]
#print [ x.value for x in relationObj.get_attributes_dict()["method_id"] ]
if "psimi_name" in relationObj.get_attributes_dict():
method_names.update([ str(x.value) for x in relationObj.get_attributes_dict()["psimi_name"] ])
if "method_id" in relationObj.get_attributes_dict():
method_ids.update([ x.value for x in relationObj.get_attributes_dict()["method_id"]])
if "pubmed" in relationObj.get_attributes_dict():
pubmed_ids.update([ x.value for x in relationObj.get_attributes_dict()["pubmed"]])
source_databases.add(str(session.dbAccess.get_external_database(
database_id = relationObj.get_source_database()) ))
if options.except_TAP:
for m in method_ids:
if m not in affinity:
use_method_ids.add(m)
#print "Add", m
else:
unused_method_names.add(affinity_dict[m])
elif options.except_Y2H:
#print "check Y2H"
for m in method_ids:
if m not in complementation:
use_method_ids.add(m)
#print "Add", m
else:
unused_method_names.add(complementation_dict[m])
elif user_rejection:
for m in method_ids:
if m not in no_methods:
use_method_ids.add(m)
elif user_selection:
for m in method_ids:
#print "Check",repr(use_methods)
if m in set(use_methods):
use_method_ids.add(m)
if options.verbose:
print "Not among selected methods ",m
else:
use_method_ids.update(method_ids)
if len(source_databases) > 0:
info_sources=";".join([str(x) for x in source_databases])
else:
if options.verbose:
print('Skipping interaction it has no source database!')
print('Node 1: {}\tNode 2: {}'.format(uE_id1, uE_id2))
skip_interactions=skip_interactions+1
continue
if len(method_names) > 0:
method_names = [x for x in method_names if x not in unused_method_names] # Remove method names that were excluded
info_methods=";".join([str(x) for x in method_names])
else:
info_methods='-'
if len(use_method_ids) > 0:
info_methods_ids=";".join([str(x) for x in use_method_ids])
else:
if options.verbose:
print('Skipping interaction it has no method!')
print('Node 1: {}\tNode 2: {}'.format(uE_id1, uE_id2))
skip_interactions=skip_interactions+1
continue
if len(pubmed_ids) > 0:
info_pubmed_ids=";".join([str(x) for x in pubmed_ids])
else:
info_pubmed_ids='-'
num_databases=len(source_databases)
num_methods=len(use_method_ids)
num_pubmeds = len(pubmed_ids)
if options.verbose:
print "Methods",num_methods,info_methods,"\tSelected:",info_methods_ids
print "Databases",num_databases,info_sources
print "Pubmeds",num_pubmeds,info_pubmed_ids
# Check if the number of methods is higher than the minimum established
if num_methods >= minimum_number_of_methods:
use=True
else:
use=False
# Check if the number of database is higher than the minimum established
if use and num_databases >= minimum_number_of_db:
use=True
else:
use=False
if not use:
skip_interactions=skip_interactions+1
#print method_names, method_ids, source_databases
#----------------------#
# OUTPUT EDGE FILE #
#----------------------#
if use:
#print uE_id1, uE_id/2
nodes.add(uE_id1)
nodes.add(uE_id2)
#print "Attribute ",(uE_id1,uE_id2).get_attribute(
if options.format == 'multi-fields' :
out_network.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n".
format(uE_id1,uE_id2,info_sources,info_methods_ids,info_methods,info_pubmed_ids))
elif options.format == 'netscore':
out_network.write('\t{}\t{}\t{:.2f}\n'.format(uE_id1,uE_id2,1.))
elif options.format == 'raw':
out_network.write("{}\t{}\n".format(uE_id1,uE_id2))
else:
# If the format is not multi-fields, netscore or raw, the output format is sif
out_network.write("{}\t{:.2f}\t{}\n".format(uE_id1,1.,uE_id2))
print "Num neglected interactions:", skip_interactions
out_network.close()
#---------------------------------------#
# OUTPUT NODE AND TRANSLATION FILES #
#---------------------------------------#
# If we wanted the complete interactome, the translation will be done differently
if options.radius <= 0:
# Output node file
out_proteins = open(options.node,'w')
for protein in nodes:
if options.format == 'multi-fields':
out_proteins.write("{0}\t{1:.2f}\t{2:.2f}\t{3:.2f}\n".format(protein,1.,1.,0.1))
elif options.format == 'netscore':
out_proteins.write("{0}\t{1:.2f}\t{2:.2f}\t{3:.2f}\n".format(protein,1.,1.,0.1))
else:
out_proteins.write("{0}\t{1:.2f}\n".format(protein,0.1))
out_proteins.close()
################################# TRANSLATION ####################################
out_translation = open(options.translation_file,'w')
# TRANSLATION TO 'stype'
trans_stype=False
if options.stype != 'proteinsequence' and options.stype != options.ttype:
trans_stype = True
out_trans_stype = open(options.translation_file+'.'+options.stype+'.trans','w')
for protein in nodes:
uE = session.get_user_entity(protein)
translate=set()
translate_stype=set()
if options.ttype == "proteinsequence":
maxlen=0;
for current_id in uE.get_attribute(attribute_identifier=options.ttype):
if maxlen < len(current_id.value.get_sequence().upper()):
maxlen=len(current_id.value.get_sequence().upper())
translation=",".join([str(current_id.value.get_sequence().upper()) for current_id in uE.get_attribute(attribute_identifier=options.ttype) if len(str(current_id.value.get_sequence().upper())) == maxlen ] )
#print "Translation",protein,translation
#print("{0}\t'{1}'\n".format(protein,translation))
else:
##### TRANSLATION TO 'ttype'
for current_id in uE.get_attribute(attribute_identifier=options.ttype):
translate.add(current_id.value.upper())
translation="','".join(["{0}".format(x) for x in translate])
out_translation.write("{0}\t'{1}'\n".format(protein,translation))
##### TRANSLATION TO STYPE
if trans_stype:
for current_id in uE.get_attribute(attribute_identifier=options.stype):
translate_stype.add(current_id.value.upper())
translation_stype="','".join(["{0}".format(x) for x in translate_stype])
out_trans_stype.write("{0}\t'{1}'\n".format(protein,translation_stype))
out_translation.close()
if trans_stype:
out_trans_stype.close()
####################################################################################
# If we wanted a network of expansion, the translation will be done differently
elif options.radius > 0:
# Read the seeds
seeds=set()
input_seed = open(options.seed,'r')
for line in input_seed:
fields = line.strip().split("\t")
seeds.add(fields[0].lower())
input_seed.close()
# Output node file
out_proteins = open(options.node,'w')
translate={}
for protein in nodes:
score=seed_score
uE = session.get_user_entity(protein)
for current_id in uE.get_attribute(attribute_identifier=options.stype):
if current_id.value.lower() in seeds:
translate.setdefault(current_id.value.lower(),[])
translate[current_id.value.lower()].append(protein)
score=1.0
if options.format == 'multi-fields':
out_proteins.write("{0}\t{1:.2f}\t{2:.2f}\t{3:.2f}\n".format(protein,1.,1.,score))
elif options.format == 'netscore':
out_proteins.write("{0}\t{1:.2f}\t{2:.2f}\t{3:.2f}\n".format(protein,1.,1.,score))
else:
out_proteins.write("{0}\t{1:.2f}\n".format(protein,score))
out_proteins.close()
# Get the IDS of single nodes that were not previously found in the network
single=set()
for uE_id in proteome.get_unconnected_nodes():
single.add(uE_id)
for protein in single:
uE = session.get_user_entity(protein)
for current_id in uE.get_attribute(attribute_identifier=options.stype):
if current_id.value.lower() in seeds:
translate.setdefault(current_id.value.lower(),[])
translate[current_id.value.lower()].append(protein)
# Get all IDS of SEEDS, defined as "proteome", and check missing codes to be
# added for translation
allseed=set()
for uE_id in proteome.get_user_entity_ids():
allseed.add(uE_id)
for protein in allseed:
if protein not in single and protein not in nodes:
uE = session.get_user_entity(protein)
for current_id in uE.get_attribute(attribute_identifier=options.stype):
if current_id.value.lower() in seeds:
translate.setdefault(current_id.value.lower(),[])
translate[current_id.value.lower()].append(protein)
################################# TRANSLATION ####################################
out_translation = open(options.translation_seeds_file,'w')
for s in seeds:
if s == '': continue
if s in translate:
codes=set(translate[s])
translation="','".join([str(x) for x in codes])
#out_translation.write("%s\t'%s'\n" % (s.upper(),translation))
out_translation.write("{0}\t'{1}'\n".format(s.upper(),translation))
else:
out_translation.write("{0}\t'Unknown'\n".format(s.upper()))
out_translation.close()
# Output translation file
# TRANSLATION TO 'ttype'
out_translation = open(options.translation_file,'w')
# TRANSLATION TO 'stype'
trans_stype=False
if options.stype != 'proteinsequence' and options.stype != options.ttype:
trans_stype = True
out_trans_stype = open(options.translation_file+'.'+options.stype+'.trans','w')
for protein in nodes:
uE = session.get_user_entity(protein)
translate=set()
translate_stype=set()
if options.ttype == "proteinsequence":
maxlen=0;
for current_id in uE.get_attribute(attribute_identifier=options.ttype):
if maxlen < len(current_id.value.get_sequence().upper()):
maxlen=len(current_id.value.get_sequence().upper())
translation=",".join([str(current_id.value.get_sequence().upper()) for current_id in uE.get_attribute(attribute_identifier=options.ttype) if len(str(current_id.value.get_sequence().upper())) == maxlen ] )
#print "Translation",protein,translation
#print("{0}\t'{1}'\n".format(protein,translation))
else:
for current_id in uE.get_attribute(attribute_identifier=options.ttype):
translate.add(current_id.value.upper())
translation="','".join(["{0}".format(x) for x in translate])
out_translation.write("{0}\t'{1}'\n".format(protein,translation))
##### TRANSLATION TO STYPE
if trans_stype:
for current_id in uE.get_attribute(attribute_identifier=options.stype):
translate_stype.add(current_id.value.upper())
translation_stype="','".join(["{0}".format(x) for x in translate_stype])
out_trans_stype.write("{0}\t'{1}'\n".format(protein,translation_stype))
out_translation.close()
if trans_stype:
out_trans_stype.close()
####################################################################################
print('Generation of the network done!')
return
def fileExist(file):
"""
Checks if a file exists AND is a file
"""
return os.path.exists(file) and os.path.isfile(file)
def get_seeds_from_file(seed_file):
"""
Obtain the seeds from a file and introduce them to a Python list.
The seeds must be separated by new lines!
"""
seed_set = set()
with open(seed_file, 'r') as seed_file_fd:
for line in seed_file_fd:
fields = line.strip().split('\t')
seed_set.add(fields[0])
return list(seed_set)
if __name__ == "__main__":
main()
| [] |
kids-first/kf-api-study-creator | tests/data/s3_scrape_config.py | 93a79b108b6474f9b4135ace06c89ddcf63dd257 | """
This is an extract config intended for S3 object manifests produced by TBD.
To use it, you must import it in another extract config and override at least
the `source_data_url`. You may also append additional operations to the
`operations` list as well.
For example you could have the following in your extract config module:
from kf_ingest_packages.common.extract_configs.s3_object_info import *
source_data_url = 'file://../data/kf-seq-data-bcm-chung-s3-objects.tsv'
operations.append(
value_map(
in_col='Key',
out_col=CONCEPT.BIOSPECIMEN.ID,
m=lambda x: x
)
)
"""
import os
from kf_lib_data_ingest.common import constants
from kf_lib_data_ingest.common.constants import GENOMIC_FILE
from kf_lib_data_ingest.common.concept_schema import CONCEPT
from kf_lib_data_ingest.etl.extract.operations import (
keep_map,
row_map,
value_map,
constant_map,
)
def file_ext(x):
"""
Get genomic file extension
"""
matches = [
file_ext for file_ext in FILE_EXT_FORMAT_MAP if x.endswith(file_ext)
]
if matches:
file_ext = max(matches, key=len)
else:
file_ext = None
return file_ext
FILE_EXT_FORMAT_MAP = {
"fq": GENOMIC_FILE.FORMAT.FASTQ,
"fastq": GENOMIC_FILE.FORMAT.FASTQ,
"fq.gz": GENOMIC_FILE.FORMAT.FASTQ,
"fastq.gz": GENOMIC_FILE.FORMAT.FASTQ,
"bam": GENOMIC_FILE.FORMAT.BAM,
"hgv.bam": GENOMIC_FILE.FORMAT.BAM,
"cram": GENOMIC_FILE.FORMAT.CRAM,
"bam.bai": GENOMIC_FILE.FORMAT.BAI,
"bai": GENOMIC_FILE.FORMAT.BAI,
"cram.crai": GENOMIC_FILE.FORMAT.CRAI,
"crai": GENOMIC_FILE.FORMAT.CRAI,
"g.vcf.gz": GENOMIC_FILE.FORMAT.GVCF,
"g.vcf.gz.tbi": GENOMIC_FILE.FORMAT.TBI,
"vcf.gz": GENOMIC_FILE.FORMAT.VCF,
"vcf": GENOMIC_FILE.FORMAT.VCF,
"vcf.gz.tbi": GENOMIC_FILE.FORMAT.TBI,
"peddy.html": "html",
}
DATA_TYPES = {
GENOMIC_FILE.FORMAT.FASTQ: GENOMIC_FILE.DATA_TYPE.UNALIGNED_READS,
GENOMIC_FILE.FORMAT.BAM: GENOMIC_FILE.DATA_TYPE.ALIGNED_READS,
GENOMIC_FILE.FORMAT.CRAM: GENOMIC_FILE.DATA_TYPE.ALIGNED_READS,
GENOMIC_FILE.FORMAT.BAI: "Aligned Reads Index",
GENOMIC_FILE.FORMAT.CRAI: "Aligned Reads Index",
GENOMIC_FILE.FORMAT.VCF: "Variant Calls",
GENOMIC_FILE.FORMAT.GVCF: "gVCF",
"g.vcf.gz.tbi": "gVCF Index",
"vcf.gz.tbi": "Variant Calls Index",
"html": "Other",
}
def filter_df_by_file_ext(df):
"""
Only keep rows where file extension is one of those in
FILE_EXT_FORMAT_MAP.keys
"""
df[CONCEPT.GENOMIC_FILE.FILE_FORMAT] = df["Key"].apply(
lambda x: file_format(x)
)
return df[df[CONCEPT.GENOMIC_FILE.FILE_FORMAT].notnull()]
source_data_url = (
'https://localhost:5002/download/study/SD_ME0WME0W/'
'file/SF_Y1JMXTTS/version/FV_4RYEMD71'
)
do_after_read = filter_df_by_file_ext
def s3_url(row):
"""
Create S3 URL for object from S3 bucket and key
"""
return f's3://{row["Bucket"]}/{row["Key"]}'
def file_format(x):
"""
Get genomic file format by looking genomic file ext up in
FILE_EXT_FORMAT_MAP dict
"""
# File format
return FILE_EXT_FORMAT_MAP.get(file_ext(x))
def data_type(x):
"""
Get genomic file data type by looking up file format in DATA_TYPES.
However, if the file's extension has `tbi` in it, then use the file
extension itself to do the data type lookup.
"""
ext = file_ext(x)
if "tbi" in ext:
data_type = DATA_TYPES.get(ext)
else:
data_type = DATA_TYPES.get(file_format(x))
return data_type
operations = [
row_map(out_col=CONCEPT.GENOMIC_FILE.ID, m=lambda row: s3_url(row)),
row_map(
out_col=CONCEPT.GENOMIC_FILE.URL_LIST, m=lambda row: [s3_url(row)]
),
value_map(
in_col="Key",
out_col=CONCEPT.GENOMIC_FILE.FILE_NAME,
m=lambda x: os.path.split(x)[-1],
),
keep_map(in_col="Size", out_col=CONCEPT.GENOMIC_FILE.SIZE),
value_map(
in_col="ETag",
out_col=CONCEPT.GENOMIC_FILE.HASH_DICT,
m=lambda x: {constants.FILE.HASH.S3_ETAG.lower(): x.replace('"', "")},
),
constant_map(
out_col=CONCEPT.GENOMIC_FILE.AVAILABILITY,
m=constants.GENOMIC_FILE.AVAILABILITY.IMMEDIATE,
),
keep_map(
in_col=CONCEPT.GENOMIC_FILE.FILE_FORMAT,
out_col=CONCEPT.GENOMIC_FILE.FILE_FORMAT,
),
value_map(
in_col="Key",
out_col=CONCEPT.GENOMIC_FILE.DATA_TYPE,
m=lambda x: data_type(x),
),
]
| [((144, 4, 144, 62), 'kf_lib_data_ingest.etl.extract.operations.keep_map', 'keep_map', (), '', False, 'from kf_lib_data_ingest.etl.extract.operations import keep_map, row_map, value_map, constant_map\n'), ((150, 4, 153, 5), 'kf_lib_data_ingest.etl.extract.operations.constant_map', 'constant_map', (), '', False, 'from kf_lib_data_ingest.etl.extract.operations import keep_map, row_map, value_map, constant_map\n'), ((154, 4, 157, 5), 'kf_lib_data_ingest.etl.extract.operations.keep_map', 'keep_map', (), '', False, 'from kf_lib_data_ingest.etl.extract.operations import keep_map, row_map, value_map, constant_map\n'), ((142, 20, 142, 36), 'os.path.split', 'os.path.split', ({(142, 34, 142, 35): 'x'}, {}), '(x)', False, 'import os\n'), ((148, 21, 148, 56), 'kf_lib_data_ingest.common.constants.FILE.HASH.S3_ETAG.lower', 'constants.FILE.HASH.S3_ETAG.lower', ({}, {}), '()', False, 'from kf_lib_data_ingest.common import constants\n')] |
jjhenkel/dockerizeme | hard-gists/5c973ec1b5ab2e387646/snippet.py | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | import bpy
from bpy.app.handlers import persistent
bl_info = {
"name": "Playback Once",
"author": "Adhi Hargo",
"version": (1, 0, 0),
"blender": (2, 67, 3),
"location": "",
"description": "Playback once.",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Animation"}
@persistent
def stopPlaybackAtEnd(scene):
if scene.frame_current >= scene.frame_end:
bpy.ops.screen.animation_cancel()
def register():
bpy.app.handlers.frame_change_pre.append(stopPlaybackAtEnd)
def unregister():
bpy.app.handlers.frame_change_pre.remove(stopPlaybackAtEnd)
if __name__ == "__main__":
register()
| [((22, 4, 22, 63), 'bpy.app.handlers.frame_change_pre.append', 'bpy.app.handlers.frame_change_pre.append', ({(22, 45, 22, 62): 'stopPlaybackAtEnd'}, {}), '(stopPlaybackAtEnd)', False, 'import bpy\n'), ((25, 4, 25, 63), 'bpy.app.handlers.frame_change_pre.remove', 'bpy.app.handlers.frame_change_pre.remove', ({(25, 45, 25, 62): 'stopPlaybackAtEnd'}, {}), '(stopPlaybackAtEnd)', False, 'import bpy\n'), ((19, 8, 19, 41), 'bpy.ops.screen.animation_cancel', 'bpy.ops.screen.animation_cancel', ({}, {}), '()', False, 'import bpy\n')] |
AlbertUnruh/Py3Challenges | Py3Challenges/saves/challenges/c6_min.py | 52f03f157860f6464f0c1710bf051a8099c29ea2 | """
To master this you should consider using the builtin-``min``-function.
"""
from ...challenge import Challenge
from random import randint
x = []
for _ in range(randint(2, 10)):
x.append(randint(1, 100))
intro = f"You have to print the lowest value of {', '.join(str(_) for _ in x[:-1])} and {x[-1]}. (values: x)"
def validate_function(stdin: str, stdout: str, stderr: str, exc: tuple) -> bool:
try:
z = int(stdout.removesuffix("\n"))
except ValueError:
return False
else:
return min(x) == z
challenge = Challenge(
intro=intro,
validate_function=validate_function,
help=__doc__,
values={"x": x},
capture_stdout=True,
)
| [((9, 15, 9, 29), 'random.randint', 'randint', ({(9, 23, 9, 24): '(2)', (9, 26, 9, 28): '(10)'}, {}), '(2, 10)', False, 'from random import randint\n'), ((10, 13, 10, 28), 'random.randint', 'randint', ({(10, 21, 10, 22): '(1)', (10, 24, 10, 27): '(100)'}, {}), '(1, 100)', False, 'from random import randint\n')] |
mrnicegyu11/osparc-simcore | services/web/server/tests/unit/with_dbs/01/test_director_v2.py | b6fa6c245dbfbc18cc74a387111a52de9b05d1f4 | # pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
from typing import AsyncIterator
import pytest
from aioresponses import aioresponses
from faker import Faker
from hypothesis import HealthCheck, given, settings
from hypothesis import strategies as st
from models_library.clusters import ClusterID
from models_library.projects import ProjectID
from models_library.projects_pipeline import ComputationTask
from models_library.projects_state import RunningState
from models_library.users import UserID
from simcore_service_webserver import director_v2_api
from simcore_service_webserver.director_v2_models import (
ClusterCreate,
ClusterPatch,
ClusterPing,
)
@pytest.fixture()
async def mocked_director_v2(
director_v2_service_mock: aioresponses,
) -> AsyncIterator[aioresponses]:
yield director_v2_service_mock
@pytest.fixture
def user_id(faker: Faker) -> UserID:
return UserID(faker.pyint(min_value=1))
@pytest.fixture
def project_id(faker: Faker) -> ProjectID:
return ProjectID(faker.uuid4())
@pytest.fixture
def cluster_id(faker: Faker) -> ClusterID:
return ClusterID(faker.pyint(min_value=0))
async def test_create_pipeline(
mocked_director_v2, client, user_id: UserID, project_id: ProjectID
):
task_out = await director_v2_api.create_or_update_pipeline(
client.app, user_id, project_id
)
assert task_out
assert isinstance(task_out, dict)
assert task_out["state"] == RunningState.NOT_STARTED
async def test_get_computation_task(
mocked_director_v2,
client,
user_id: UserID,
project_id: ProjectID,
):
task_out = await director_v2_api.get_computation_task(
client.app, user_id, project_id
)
assert task_out
assert isinstance(task_out, ComputationTask)
assert task_out.state == RunningState.NOT_STARTED
async def test_delete_pipeline(
mocked_director_v2, client, user_id: UserID, project_id: ProjectID
):
await director_v2_api.delete_pipeline(client.app, user_id, project_id)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(cluster_create=st.builds(ClusterCreate))
async def test_create_cluster(
mocked_director_v2, client, user_id: UserID, cluster_create
):
created_cluster = await director_v2_api.create_cluster(
client.app, user_id=user_id, new_cluster=cluster_create
)
assert created_cluster is not None
assert isinstance(created_cluster, dict)
assert "id" in created_cluster
async def test_list_clusters(mocked_director_v2, client, user_id: UserID):
list_of_clusters = await director_v2_api.list_clusters(client.app, user_id=user_id)
assert isinstance(list_of_clusters, list)
assert len(list_of_clusters) > 0
async def test_get_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
cluster = await director_v2_api.get_cluster(
client.app, user_id=user_id, cluster_id=cluster_id
)
assert isinstance(cluster, dict)
assert cluster["id"] == cluster_id
async def test_get_cluster_details(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
cluster_details = await director_v2_api.get_cluster_details(
client.app, user_id=user_id, cluster_id=cluster_id
)
assert isinstance(cluster_details, dict)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(cluster_patch=st.from_type(ClusterPatch))
async def test_update_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID, cluster_patch
):
print(f"--> updating cluster with {cluster_patch=}")
updated_cluster = await director_v2_api.update_cluster(
client.app, user_id=user_id, cluster_id=cluster_id, cluster_patch=cluster_patch
)
assert isinstance(updated_cluster, dict)
assert updated_cluster["id"] == cluster_id
async def test_delete_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
await director_v2_api.delete_cluster(
client.app, user_id=user_id, cluster_id=cluster_id
)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(cluster_ping=st.builds(ClusterPing))
async def test_ping_cluster(mocked_director_v2, client, cluster_ping: ClusterPing):
await director_v2_api.ping_cluster(client.app, cluster_ping=cluster_ping)
async def test_ping_specific_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
await director_v2_api.ping_specific_cluster(
client.app, user_id=user_id, cluster_id=cluster_id
)
| [((26, 1, 26, 17), 'pytest.fixture', 'pytest.fixture', ({}, {}), '()', False, 'import pytest\n'), ((79, 1, 79, 70), 'hypothesis.settings', 'settings', (), '', False, 'from hypothesis import HealthCheck, given, settings\n'), ((117, 1, 117, 70), 'hypothesis.settings', 'settings', (), '', False, 'from hypothesis import HealthCheck, given, settings\n'), ((138, 1, 138, 70), 'hypothesis.settings', 'settings', (), '', False, 'from hypothesis import HealthCheck, given, settings\n'), ((51, 21, 53, 5), 'simcore_service_webserver.director_v2_api.create_or_update_pipeline', 'director_v2_api.create_or_update_pipeline', ({(52, 8, 52, 18): 'client.app', (52, 20, 52, 27): 'user_id', (52, 29, 52, 39): 'project_id'}, {}), '(client.app, user_id, project_id)', False, 'from simcore_service_webserver import director_v2_api\n'), ((65, 21, 67, 5), 'simcore_service_webserver.director_v2_api.get_computation_task', 'director_v2_api.get_computation_task', ({(66, 8, 66, 18): 'client.app', (66, 20, 66, 27): 'user_id', (66, 29, 66, 39): 'project_id'}, {}), '(client.app, user_id, project_id)', False, 'from simcore_service_webserver import director_v2_api\n'), ((76, 10, 76, 74), 'simcore_service_webserver.director_v2_api.delete_pipeline', 'director_v2_api.delete_pipeline', ({(76, 42, 76, 52): 'client.app', (76, 54, 76, 61): 'user_id', (76, 63, 76, 73): 'project_id'}, {}), '(client.app, user_id, project_id)', False, 'from simcore_service_webserver import director_v2_api\n'), ((84, 28, 86, 5), 'simcore_service_webserver.director_v2_api.create_cluster', 'director_v2_api.create_cluster', (), '', False, 'from simcore_service_webserver import director_v2_api\n'), ((80, 22, 80, 46), 'hypothesis.strategies.builds', 'st.builds', ({(80, 32, 80, 45): 'ClusterCreate'}, {}), '(ClusterCreate)', True, 'from hypothesis import strategies as st\n'), ((93, 29, 93, 87), 'simcore_service_webserver.director_v2_api.list_clusters', 'director_v2_api.list_clusters', (), '', False, 'from simcore_service_webserver import director_v2_api\n'), ((101, 20, 103, 5), 'simcore_service_webserver.director_v2_api.get_cluster', 'director_v2_api.get_cluster', (), '', False, 'from simcore_service_webserver import director_v2_api\n'), ((111, 28, 113, 5), 'simcore_service_webserver.director_v2_api.get_cluster_details', 'director_v2_api.get_cluster_details', (), '', False, 'from simcore_service_webserver import director_v2_api\n'), ((123, 28, 125, 5), 'simcore_service_webserver.director_v2_api.update_cluster', 'director_v2_api.update_cluster', (), '', False, 'from simcore_service_webserver import director_v2_api\n'), ((118, 21, 118, 47), 'hypothesis.strategies.from_type', 'st.from_type', ({(118, 34, 118, 46): 'ClusterPatch'}, {}), '(ClusterPatch)', True, 'from hypothesis import strategies as st\n'), ((133, 10, 135, 5), 'simcore_service_webserver.director_v2_api.delete_cluster', 'director_v2_api.delete_cluster', (), '', False, 'from simcore_service_webserver import director_v2_api\n'), ((141, 10, 141, 77), 'simcore_service_webserver.director_v2_api.ping_cluster', 'director_v2_api.ping_cluster', (), '', False, 'from simcore_service_webserver import director_v2_api\n'), ((139, 20, 139, 42), 'hypothesis.strategies.builds', 'st.builds', ({(139, 30, 139, 41): 'ClusterPing'}, {}), '(ClusterPing)', True, 'from hypothesis import strategies as st\n'), ((147, 10, 149, 5), 'simcore_service_webserver.director_v2_api.ping_specific_cluster', 'director_v2_api.ping_specific_cluster', (), '', False, 'from simcore_service_webserver import director_v2_api\n')] |
sriramreddyM/pLitter | tools/py/heatmap.py | e506777af0b8bbae411b474f5eacee91e8efea59 | '''
converts video to frames and saves images by different interval, or overlap, etc
'''
import folium
from folium import plugins
from folium.plugins import HeatMap
import csv
# class plitterMap():
# def __int__(self, file_path):
# self.data = file_path
# df = []
# with open(self.data) as f:
# reader = csv.reader(f)
# for row in reader:
# df_row = []
# df_row.append(row[0])
# df_row.append(row[0])
# df_row.append(row[0])
# df.append(row)
# self.tooltip = df[0][0]
# def loadMap():
# self.map = folium.Map(location=[float(row[1]), float(row[2])], zoom_start = 18)
# def loadGpsLoc():
# folium.Marker([float(row[1]), float(row[2])], popup="<i>"+row[0]+"</i>", tooltip=tooltip, icon=icon_circle).add_to(rangsit_map)
# rangsit_map | [] |
Geoalert/emergency-mapping | generator.py | 96668e4e5aa2b520e5727536f7a8f4c262ee3da6 | import numpy as np
def random_augmentation(img, mask):
#you can add any augmentations you need
return img, mask
def batch_generator(image, mask,
batch_size=1,
crop_size=0,
patch_size=256,
bbox= None,
augmentation=False):
'''
image: nparray, must have 3 dimension
mask: nparray, 2 dimensions, same size as image
batch_size: int, number of images in a batch
patch_size: int, size of the image returned, patch is square
crop_size: int, how much pixels should be cropped off the mask
bbox: None or tuple of 4 ints, (min_y, max_y, min_x, max_x), the data is selected from within the bbox
augmentation: turn on/off data augmentation. The augmentation function is random_augmentation() above
returns batch of image and mask patches, image is turned to 'channels last' as required by unet
'''
if np.ndim(mask) != 2 or np.ndim(image) != 3:
raise ValueError('image must have 3 dims and mask 2 dims')
if mask.shape != image.shape[1:]:
raise ValueError('image and mask shape is different')
im_max = float(np.max(image))
mask_max = 1.0
#select subimage
if bbox is not None:
# check bbox
if bbox[0] < 0 or bbox [2] < 0 \
or bbox[1] > mask.shape[0] or bbox[3] > mask.shape[0] \
or bbox[0] + patch_size > bbox[1] or bbox[2] + patch_size > bbox[3] \
or patch_size <= 0:
raise ValueError("Incorrect bbox or patch size")
img_ = image[:, bbox[0] : bbox[1], bbox[2]:bbox[3]]
mask_ = mask[bbox[0] : bbox[1], bbox[2]:bbox[3]]
else:
img_ = image
mask_ = mask
while 1:
x = []
y = []
for i in range (batch_size):
random_x = np.random.randint(0, mask_.shape[1] - patch_size)
random_y = np.random.randint(0, mask_.shape[0] - patch_size)
img_patch = img_[:,
random_y : random_y + patch_size,
random_x : random_x + patch_size] / im_max
# transform the image from channels-first (rasterio format) to channels-last (default tensorflow format)
img_patch = np.moveaxis(img_patch, 0, 2)
mask_patch = mask_[random_y : random_y + patch_size,
random_x : random_x + patch_size] / mask_max
if augmentation:
img_patch, mask_patch = random_augmentation(img_patch, mask_patch)
# mask is cropped as it may be useful for some convnets that have output size less than input
if crop_size > 0:
mask_patch = mask_patch[crop_size : -crop_size,
crop_size : -crop_size]
mask_patch = np.expand_dims(mask_patch, 2)
x.append(img_patch)
y.append(mask_patch)
yield (np.array(x), np.array(y))
| [((29, 19, 29, 32), 'numpy.max', 'np.max', ({(29, 26, 29, 31): 'image'}, {}), '(image)', True, 'import numpy as np\n'), ((24, 7, 24, 20), 'numpy.ndim', 'np.ndim', ({(24, 15, 24, 19): 'mask'}, {}), '(mask)', True, 'import numpy as np\n'), ((24, 29, 24, 43), 'numpy.ndim', 'np.ndim', ({(24, 37, 24, 42): 'image'}, {}), '(image)', True, 'import numpy as np\n'), ((49, 23, 49, 72), 'numpy.random.randint', 'np.random.randint', ({(49, 41, 49, 42): '0', (49, 44, 49, 71): 'mask_.shape[1] - patch_size'}, {}), '(0, mask_.shape[1] - patch_size)', True, 'import numpy as np\n'), ((50, 23, 50, 72), 'numpy.random.randint', 'np.random.randint', ({(50, 41, 50, 42): '0', (50, 44, 50, 71): 'mask_.shape[0] - patch_size'}, {}), '(0, mask_.shape[0] - patch_size)', True, 'import numpy as np\n'), ((56, 24, 56, 52), 'numpy.moveaxis', 'np.moveaxis', ({(56, 36, 56, 45): 'img_patch', (56, 47, 56, 48): '0', (56, 50, 56, 51): '2'}, {}), '(img_patch, 0, 2)', True, 'import numpy as np\n'), ((69, 25, 69, 54), 'numpy.expand_dims', 'np.expand_dims', ({(69, 40, 69, 50): 'mask_patch', (69, 52, 69, 53): '2'}, {}), '(mask_patch, 2)', True, 'import numpy as np\n'), ((72, 15, 72, 26), 'numpy.array', 'np.array', ({(72, 24, 72, 25): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((72, 28, 72, 39), 'numpy.array', 'np.array', ({(72, 37, 72, 38): 'y'}, {}), '(y)', True, 'import numpy as np\n')] |
Avinesh/awx | awx/api/metadata.py | 6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf | # Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
from collections import OrderedDict
# Django
from django.core.exceptions import PermissionDenied
from django.db.models.fields import PositiveIntegerField, BooleanField
from django.db.models.fields.related import ForeignKey
from django.http import Http404
from django.utils.encoding import force_text, smart_text
from django.utils.translation import ugettext_lazy as _
# Django REST Framework
from rest_framework import exceptions
from rest_framework import metadata
from rest_framework import serializers
from rest_framework.relations import RelatedField, ManyRelatedField
from rest_framework.fields import JSONField as DRFJSONField
from rest_framework.request import clone_request
# AWX
from awx.main.fields import JSONField, ImplicitRoleField
from awx.main.models import InventorySource, NotificationTemplate
from awx.main.scheduler.kubernetes import PodManager
class Metadata(metadata.SimpleMetadata):
def get_field_info(self, field):
field_info = OrderedDict()
field_info['type'] = self.label_lookup[field]
field_info['required'] = getattr(field, 'required', False)
text_attrs = [
'read_only', 'label', 'help_text',
'min_length', 'max_length',
'min_value', 'max_value',
'category', 'category_slug',
'defined_in_file'
]
for attr in text_attrs:
value = getattr(field, attr, None)
if value is not None and value != '':
field_info[attr] = force_text(value, strings_only=True)
placeholder = getattr(field, 'placeholder', serializers.empty)
if placeholder is not serializers.empty:
field_info['placeholder'] = placeholder
serializer = getattr(field, 'parent', None)
if serializer and hasattr(serializer, 'Meta') and hasattr(serializer.Meta, 'model'):
# Update help text for common fields.
field_help_text = {
'id': _('Database ID for this {}.'),
'name': _('Name of this {}.'),
'description': _('Optional description of this {}.'),
'type': _('Data type for this {}.'),
'url': _('URL for this {}.'),
'related': _('Data structure with URLs of related resources.'),
'summary_fields': _('Data structure with name/description for related resources.'),
'created': _('Timestamp when this {} was created.'),
'modified': _('Timestamp when this {} was last modified.'),
}
if field.field_name in field_help_text:
opts = serializer.Meta.model._meta.concrete_model._meta
verbose_name = smart_text(opts.verbose_name)
field_info['help_text'] = field_help_text[field.field_name].format(verbose_name)
if field.field_name == 'type':
field_info['filterable'] = True
else:
for model_field in serializer.Meta.model._meta.fields:
if field.field_name == model_field.name:
if getattr(model_field, '__accepts_json__', None):
field_info['type'] = 'json'
field_info['filterable'] = True
break
else:
field_info['filterable'] = False
# Indicate if a field has a default value.
# FIXME: Still isn't showing all default values?
try:
default = field.get_default()
if field.field_name == 'TOWER_URL_BASE' and default == 'https://towerhost':
default = '{}://{}'.format(self.request.scheme, self.request.get_host())
field_info['default'] = default
except serializers.SkipField:
pass
if getattr(field, 'child', None):
field_info['child'] = self.get_field_info(field.child)
elif getattr(field, 'fields', None):
field_info['children'] = self.get_serializer_info(field)
if not isinstance(field, (RelatedField, ManyRelatedField)) and hasattr(field, 'choices'):
field_info['choices'] = [(choice_value, choice_name) for choice_value, choice_name in field.choices.items()]
# Indicate if a field is write-only.
if getattr(field, 'write_only', False):
field_info['write_only'] = True
# Special handling of inventory source_region choices that vary based on
# selected inventory source.
if field.field_name == 'source_regions':
for cp in ('azure_rm', 'ec2', 'gce'):
get_regions = getattr(InventorySource, 'get_%s_region_choices' % cp)
field_info['%s_region_choices' % cp] = get_regions()
# Special handling of group_by choices for EC2.
if field.field_name == 'group_by':
for cp in ('ec2',):
get_group_by_choices = getattr(InventorySource, 'get_%s_group_by_choices' % cp)
field_info['%s_group_by_choices' % cp] = get_group_by_choices()
# Special handling of notification configuration where the required properties
# are conditional on the type selected.
if field.field_name == 'notification_configuration':
for (notification_type_name, notification_tr_name, notification_type_class) in NotificationTemplate.NOTIFICATION_TYPES:
field_info[notification_type_name] = notification_type_class.init_parameters
# Special handling of notification messages where the required properties
# are conditional on the type selected.
try:
view_model = field.context['view'].model
except (AttributeError, KeyError):
view_model = None
if view_model == NotificationTemplate and field.field_name == 'messages':
for (notification_type_name, notification_tr_name, notification_type_class) in NotificationTemplate.NOTIFICATION_TYPES:
field_info[notification_type_name] = notification_type_class.default_messages
# Update type of fields returned...
model_field = None
if serializer and hasattr(serializer, 'Meta') and hasattr(serializer.Meta, 'model'):
try:
model_field = serializer.Meta.model._meta.get_field(field.field_name)
except Exception:
pass
if field.field_name == 'type':
field_info['type'] = 'choice'
elif field.field_name in ('url', 'custom_virtualenv', 'token'):
field_info['type'] = 'string'
elif field.field_name in ('related', 'summary_fields'):
field_info['type'] = 'object'
elif isinstance(field, PositiveIntegerField):
field_info['type'] = 'integer'
elif field.field_name in ('created', 'modified'):
field_info['type'] = 'datetime'
elif (
RelatedField in field.__class__.__bases__ or
isinstance(model_field, ForeignKey)
):
field_info['type'] = 'id'
elif (
isinstance(field, JSONField) or
isinstance(model_field, JSONField) or
isinstance(field, DRFJSONField) or
isinstance(getattr(field, 'model_field', None), JSONField) or
field.field_name == 'credential_passwords'
):
field_info['type'] = 'json'
elif (
isinstance(field, ManyRelatedField) and
field.field_name == 'credentials'
# launch-time credentials
):
field_info['type'] = 'list_of_ids'
elif isinstance(model_field, BooleanField):
field_info['type'] = 'boolean'
return field_info
def get_serializer_info(self, serializer, method=None):
filterer = getattr(serializer, 'filter_field_metadata', lambda fields, method: fields)
return filterer(
super(Metadata, self).get_serializer_info(serializer),
method
)
def determine_actions(self, request, view):
# Add field information for GET requests (so field names/labels are
# available even when we can't POST/PUT).
actions = {}
for method in {'GET', 'PUT', 'POST'} & set(view.allowed_methods):
view.request = clone_request(request, method)
obj = None
try:
# Test global permissions
if hasattr(view, 'check_permissions'):
view.check_permissions(view.request)
# Test object permissions
if method == 'PUT' and hasattr(view, 'get_object'):
obj = view.get_object()
except (exceptions.APIException, PermissionDenied, Http404):
continue
else:
# If user has appropriate permissions for the view, include
# appropriate metadata about the fields that should be supplied.
serializer = view.get_serializer(instance=obj)
actions[method] = self.get_serializer_info(serializer, method=method)
finally:
view.request = request
for field, meta in list(actions[method].items()):
if not isinstance(meta, dict):
continue
if field == "pod_spec_override":
meta['default'] = PodManager().pod_definition
# Add type choices if available from the serializer.
if field == 'type' and hasattr(serializer, 'get_type_choices'):
meta['choices'] = serializer.get_type_choices()
# For GET method, remove meta attributes that aren't relevant
# when reading a field and remove write-only fields.
if method == 'GET':
attrs_to_remove = ('required', 'read_only', 'default', 'min_length', 'max_length', 'placeholder')
for attr in attrs_to_remove:
meta.pop(attr, None)
meta.get('child', {}).pop(attr, None)
if meta.pop('write_only', False):
actions['GET'].pop(field)
# For PUT/POST methods, remove read-only fields.
if method in ('PUT', 'POST'):
# This value should always be False for PUT/POST, so don't
# show it (file-based read-only settings can't be updated)
meta.pop('defined_in_file', False)
if meta.pop('read_only', False):
if field == 'id' and hasattr(view, 'attach'):
continue
actions[method].pop(field)
return actions
def determine_metadata(self, request, view):
# store request on self so we can use it to generate field defaults
# (such as TOWER_URL_BASE)
self.request = request
try:
setattr(view, '_request', request)
metadata = super(Metadata, self).determine_metadata(request, view)
finally:
delattr(view, '_request')
# Add type(s) handled by this view/serializer.
if hasattr(view, 'get_serializer'):
serializer = view.get_serializer()
if hasattr(serializer, 'get_types'):
metadata['types'] = serializer.get_types()
# Add search fields if available from the view.
if getattr(view, 'search_fields', None):
metadata['search_fields'] = view.search_fields
# Add related search fields if available from the view.
if getattr(view, 'related_search_fields', None):
metadata['related_search_fields'] = view.related_search_fields
# include role names in metadata
roles = []
model = getattr(view, 'model', None)
if model:
for field in model._meta.get_fields():
if type(field) is ImplicitRoleField:
roles.append(field.name)
if len(roles) > 0:
metadata['object_roles'] = roles
from rest_framework import generics
if isinstance(view, generics.ListAPIView) and hasattr(view, 'paginator'):
metadata['max_page_size'] = view.paginator.max_page_size
return metadata
class RoleMetadata(Metadata):
def determine_metadata(self, request, view):
metadata = super(RoleMetadata, self).determine_metadata(request, view)
if 'actions' in metadata:
metadata['actions'].pop('POST')
metadata['actions']['POST'] = {
"id": {"type": "integer", "label": "ID", "help_text": "Database ID for this role."},
"disassociate": {"type": "integer", "label": "Disassociate", "help_text": "Provide to remove this role."},
}
return metadata
class SublistAttachDetatchMetadata(Metadata):
def determine_actions(self, request, view):
actions = super(SublistAttachDetatchMetadata, self).determine_actions(request, view)
method = 'POST'
if method in actions:
for field in list(actions[method].keys()):
if field == 'id':
continue
actions[method].pop(field)
return actions
| [((31, 21, 31, 34), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((188, 27, 188, 57), 'rest_framework.request.clone_request', 'clone_request', ({(188, 41, 188, 48): 'request', (188, 50, 188, 56): 'method'}, {}), '(request, method)', False, 'from rest_framework.request import clone_request\n'), ((46, 35, 46, 71), 'django.utils.encoding.force_text', 'force_text', (), '', False, 'from django.utils.encoding import force_text, smart_text\n'), ((56, 22, 56, 51), 'django.utils.translation.ugettext_lazy', '_', ({(56, 24, 56, 50): '"""Database ID for this {}."""'}, {}), "('Database ID for this {}.')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((57, 24, 57, 45), 'django.utils.translation.ugettext_lazy', '_', ({(57, 26, 57, 44): '"""Name of this {}."""'}, {}), "('Name of this {}.')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((58, 31, 58, 68), 'django.utils.translation.ugettext_lazy', '_', ({(58, 33, 58, 67): '"""Optional description of this {}."""'}, {}), "('Optional description of this {}.')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((59, 24, 59, 51), 'django.utils.translation.ugettext_lazy', '_', ({(59, 26, 59, 50): '"""Data type for this {}."""'}, {}), "('Data type for this {}.')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((60, 23, 60, 44), 'django.utils.translation.ugettext_lazy', '_', ({(60, 25, 60, 43): '"""URL for this {}."""'}, {}), "('URL for this {}.')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((61, 27, 61, 78), 'django.utils.translation.ugettext_lazy', '_', ({(61, 29, 61, 77): '"""Data structure with URLs of related resources."""'}, {}), "('Data structure with URLs of related resources.')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((62, 34, 62, 98), 'django.utils.translation.ugettext_lazy', '_', ({(62, 36, 62, 97): '"""Data structure with name/description for related resources."""'}, {}), "('Data structure with name/description for related resources.')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((63, 27, 63, 67), 'django.utils.translation.ugettext_lazy', '_', ({(63, 29, 63, 66): '"""Timestamp when this {} was created."""'}, {}), "('Timestamp when this {} was created.')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((64, 28, 64, 74), 'django.utils.translation.ugettext_lazy', '_', ({(64, 30, 64, 73): '"""Timestamp when this {} was last modified."""'}, {}), "('Timestamp when this {} was last modified.')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((68, 31, 68, 60), 'django.utils.encoding.smart_text', 'smart_text', ({(68, 42, 68, 59): 'opts.verbose_name'}, {}), '(opts.verbose_name)', False, 'from django.utils.encoding import force_text, smart_text\n'), ((212, 38, 212, 50), 'awx.main.scheduler.kubernetes.PodManager', 'PodManager', ({}, {}), '()', False, 'from awx.main.scheduler.kubernetes import PodManager\n')] |
BBVA/deeptracy | plugins/python/tasks.py | 40f4b6bba2bdd345e95e42d474c05fa90f15c3e9 | import json
from washer.worker.actions import AppendStdout, AppendStderr
from washer.worker.actions import CreateNamedLog, AppendToLog
from washer.worker.actions import SetProperty
from washer.worker.commands import washertask
def pipenv_graph2deps(rawgraph):
graph = json.loads(rawgraph)
def build_entry(data):
if 'required_version' in data:
spec = data['key'] + data['required_version']
else:
spec = data['key']
return {'installer': 'pipenv',
'spec': spec,
'source': 'pypi',
'name': data['package_name'],
'version': data['installed_version']}
def extract_dependencies(entries):
for entry in entries:
if 'package' in entry:
package = entry['package']
dependencies = entry.get('dependencies', [])
yield build_entry(package)
yield from extract_dependencies(dependencies)
else:
yield build_entry(entry)
yield from extract_dependencies(graph)
@washertask
def pip_install(repopath, path=".", **kwargs):
import invoke
c = invoke.Context()
with c.cd(repopath):
with c.cd(path):
res = c.run("pipenv install .")
deps = c.run("pipenv graph --json")
yield AppendStdout(res.stdout)
yield AppendStderr(res.stderr)
yield SetProperty("dependencies", list(pipenv_graph2deps(deps.stdout)))
return True
@washertask
def requirement_file(repopath, requirement="requirements.txt",
path=".", **kwargs):
import invoke
c = invoke.Context()
with c.cd(repopath):
with c.cd(path):
res = c.run("pipenv install -r %s" % requirement)
deps = c.run("pipenv graph --json")
yield AppendStdout(res.stdout)
yield AppendStderr(res.stderr)
yield SetProperty("dependencies", list(pipenv_graph2deps(deps.stdout)))
return True
| [((10, 12, 10, 32), 'json.loads', 'json.loads', ({(10, 23, 10, 31): 'rawgraph'}, {}), '(rawgraph)', False, 'import json\n'), ((40, 8, 40, 24), 'invoke.Context', 'invoke.Context', ({}, {}), '()', False, 'import invoke\n'), ((58, 8, 58, 24), 'invoke.Context', 'invoke.Context', ({}, {}), '()', False, 'import invoke\n'), ((47, 10, 47, 34), 'washer.worker.actions.AppendStdout', 'AppendStdout', ({(47, 23, 47, 33): 'res.stdout'}, {}), '(res.stdout)', False, 'from washer.worker.actions import AppendStdout, AppendStderr\n'), ((48, 10, 48, 34), 'washer.worker.actions.AppendStderr', 'AppendStderr', ({(48, 23, 48, 33): 'res.stderr'}, {}), '(res.stderr)', False, 'from washer.worker.actions import AppendStdout, AppendStderr\n'), ((65, 10, 65, 34), 'washer.worker.actions.AppendStdout', 'AppendStdout', ({(65, 23, 65, 33): 'res.stdout'}, {}), '(res.stdout)', False, 'from washer.worker.actions import AppendStdout, AppendStderr\n'), ((66, 10, 66, 34), 'washer.worker.actions.AppendStderr', 'AppendStderr', ({(66, 23, 66, 33): 'res.stderr'}, {}), '(res.stderr)', False, 'from washer.worker.actions import AppendStdout, AppendStderr\n')] |
pkokkinos/senity | senity/utils/getSiteProfile.py | c6e41678620bef558cc3600929a8320ff2a285cf | import json
import os
# get site profile
def getSiteProfile(site_file):
with open(site_file) as json_file:
json_data = json.load(json_file)
return json_data
# get all site profile
def getAllSiteProfiles(site_folder):
allSiteProfiles = {}
allSiteFiles = os.listdir(site_folder)
for sf in allSiteFiles:
sp = getSiteProfile(site_folder + "/" + sf)
allSiteProfiles[sp["siteName"]] = []
for device in sp["devicesAvailable"]:
for i in range(device["deviceCounter"]):
allSiteProfiles[sp["siteName"]].append(device["deviceName"])
return allSiteProfiles
#sites_folder = "sites"
#print getAllSiteProfiles(sites_folder)
| [((17, 19, 17, 42), 'os.listdir', 'os.listdir', ({(17, 30, 17, 41): 'site_folder'}, {}), '(site_folder)', False, 'import os\n'), ((8, 20, 8, 40), 'json.load', 'json.load', ({(8, 30, 8, 39): 'json_file'}, {}), '(json_file)', False, 'import json\n')] |
QingXinHu123/Lane_change_RL | ppo_new/baseline.py | 06c70e6f58d3478669b56800028e320ca03f5222 | import os, sys
from env.LaneChangeEnv import LaneChangeEnv
import random
import numpy as np
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
print('success')
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
def episode_generator(pi, env, is_gui, ttc, gap, sumoseed, randomseed):
egoid = 'lane1.' + str(random.randint(1, 6))
ob = env.reset(egoid=egoid, tlane=0, tfc=2, is_gui=is_gui, sumoseed=sumoseed, randomseed=randomseed)
traci.vehicle.setColor(egoid, (255, 69, 0))
cur_ep_ret = 0 # return in current episode
cur_ep_ret_detail = 0
cur_ep_len = 0 # len of current episode
cur_ep_obs = []
cur_ep_acs = []
while True:
ac = pi(ob=ob, env=env, ttc=ttc, gap=gap)
ob, rew, new, info = env.step(ac)
cur_ep_ret += rew
cur_ep_ret_detail += np.array(list(info['reward_dict'].values()))
cur_ep_len += 1
cur_ep_obs.append(ob)
cur_ep_acs.append(ac)
if new:
return {"ep_obs": cur_ep_obs, "ep_acs": cur_ep_acs,
"ep_ret": cur_ep_ret, 'ep_rets_detail': cur_ep_ret_detail, "ep_len": cur_ep_len,
'ep_num_danger': info['num_danger'], 'ep_is_success': info['is_success'], 'ep_num_crash': info['num_crash'],
'ep_is_collision': info["is_collision"]}
def pi_baseline(ob, env, ttc, gap):
# safety gap set to seconds to collision
if env.ego.trgt_leader:
leader_speed = env.ego.trgt_leader.speed
else:
leader_speed = env.ego.speed
if env.ego.trgt_follower:
follower_speed = env.ego.trgt_follower.speed
else:
follower_speed = env.ego.speed
leader_dis = abs(ob[3 * 4 + 0 + 1])*239.8
follower_dis = abs(ob[4 * 4 + 0 + 1])*239.8
TTC = (leader_dis - 5) / max(env.ego.speed, 0.001)
TTC2 = (follower_dis - 5) / max(follower_speed, 0.001)
# print(TTC, TTC)
if TTC > ttc and TTC2 > ttc and leader_dis > gap and follower_dis > gap:
ac_lat = 1 # change lane
else:
ac_lat = 0 # abort
ac = ac_lat * 3 + 1
return ac
def evaluate_baseline(num_eps, ttc, gap, is_gui):
sumoseed = 0
randomseed = 0
pi = pi_baseline
env = LaneChangeEnv(is_train=False)
ret_eval = 0
ret_det_eval = 0 # not a integer, will be broadcasted
danger_num = 0
crash_num = 0
level_1_danger = []
level_2_danger = []
collision_num = 0
ep_len_list = []
success_num = 0
for i in range(num_eps):
ep_eval = episode_generator(pi, env, is_gui=is_gui, ttc=ttc, gap=gap, sumoseed=sumoseed, randomseed=randomseed)
ret_eval += ep_eval['ep_ret']
ret_det_eval += ep_eval['ep_rets_detail']
danger_num += ep_eval['ep_num_danger']
crash_num += ep_eval['ep_num_crash']
level_1_danger.append(1 if ep_eval['ep_num_danger'] > 0 else 0)
level_2_danger.append((1 if ep_eval['ep_num_crash'] > 0 else 0))
collision_num += ep_eval['ep_is_collision']
success_num += int(ep_eval['ep_is_success'])
if ep_eval['ep_is_success']:
ep_len_list.append(ep_eval['ep_len'])
sumoseed += 1
randomseed += 1
ret_eval /= float(num_eps)
ret_det_eval /= float(num_eps)
danger_rate = danger_num / num_eps
crash_rate = crash_num / num_eps
level_1_danger_rate = np.mean(level_1_danger)
level_2_danger_rate = np.mean(level_2_danger)
coll_rate = collision_num / num_eps
success_rate = success_num / float(num_eps)
success_len = np.mean(ep_len_list)
print('reward_detail: ', ret_det_eval)
print('reward: ', ret_eval,
'\ndanger_rate: ', danger_rate,
'\ncrash_rate: ', crash_rate,
'\nlevel-1-danger_rate: ', level_1_danger_rate,
'\nlevel-2-danger_rate: ', level_2_danger_rate,
'\ncollision_rate: ', coll_rate,
'\nsuccess_rate: ', success_rate,
'\nsucess_len: ', success_len)
env.close()
return ret_eval, danger_rate, crash_rate, level_1_danger_rate, level_2_danger_rate, coll_rate, success_rate, success_len
NUM_EPS = 100
IS_GUI = False
# f = open('../data/baseline_evaluation/testseed2.csv', 'w+')
# safety_gap = 2
constraints_list = [3.0] # [1.0, 2.0, 3.0, 4.0, 5.0, 10.0, 20.0]
ttcs = [0.1, 0.3, 0.5, 1, 2, 3]
# ttcs = [2]
gap = 0
reward_list = []
danger_rate_list = []
crash_rate_list = []
level_1_danger_list = []
level_2_danger_list = []
coll_rate_list = []
succ_rate_list = []
succ_len_list = []
for ttc in ttcs:
ret_eval, danger_rate, crash_rate, level_1_danger_rate, level_2_danger_rate, coll_rate, success_rate, success_len = evaluate_baseline(NUM_EPS, ttc, gap, IS_GUI)
reward_list.append(ret_eval)
danger_rate_list.append(danger_rate)
crash_rate_list.append(crash_rate)
level_1_danger_list.append(level_1_danger_rate)
level_2_danger_list.append(level_2_danger_rate)
coll_rate_list.append(coll_rate)
succ_rate_list.append(success_rate)
succ_len_list.append(success_len)
print('reward: ', reward_list)
print('danger rate: ', danger_rate_list)
print('crash rate: ', crash_rate_list)
print('level-1-danger_rate: ', level_1_danger_list)
print('level-2-danger_rate: ', level_2_danger_list)
print('collison rate: ', coll_rate_list)
print('success rate: ', succ_rate_list)
print('sucess len: ', succ_len_list)
# reward: [-89.12552753359037, -69.84537459892903, -73.81562785829651, -148.23580687485645, -227.71842861064192, -229.9101089174337]
# danger rate: [2.13, 0.88, 0.77, 1.88, 3.82, 3.82]
# crash rate: [0.58, 0.33, 0.5, 1.24, 2.09, 2.09]
# level-1-danger_rate: [0.23, 0.09, 0.05, 0.14, 0.25, 0.25]
# level-2-danger_rate: [0.05, 0.03, 0.05, 0.12, 0.2, 0.2]
# collison rate: [0.0, 0.0, 0.02, 0.09, 0.14, 0.14]
# success rate: [0.99, 0.99, 0.9, 0.6, 0.08, 0.05]
# sucess len: [55.656565656565654, 62.43434343434343, 67.5, 90.1, 66.625, 73.4]
| [((6, 12, 6, 58), 'os.path.join', 'os.path.join', ({(6, 25, 6, 48): "os.environ['SUMO_HOME']", (6, 50, 6, 57): '"""tools"""'}, {}), "(os.environ['SUMO_HOME'], 'tools')", False, 'import os, sys\n'), ((7, 4, 7, 26), 'sys.path.append', 'sys.path.append', ({(7, 20, 7, 25): 'tools'}, {}), '(tools)', False, 'import os, sys\n'), ((10, 4, 10, 63), 'sys.exit', 'sys.exit', ({(10, 13, 10, 62): '"""please declare environment variable \'SUMO_HOME\'"""'}, {}), '("please declare environment variable \'SUMO_HOME\'")', False, 'import os, sys\n'), ((17, 4, 17, 47), 'traci.vehicle.setColor', 'traci.vehicle.setColor', ({(17, 27, 17, 32): 'egoid', (17, 34, 17, 46): '(255, 69, 0)'}, {}), '(egoid, (255, 69, 0))', False, 'import traci\n'), ((68, 10, 68, 39), 'env.LaneChangeEnv.LaneChangeEnv', 'LaneChangeEnv', (), '', False, 'from env.LaneChangeEnv import LaneChangeEnv\n'), ((98, 26, 98, 49), 'numpy.mean', 'np.mean', ({(98, 34, 98, 48): 'level_1_danger'}, {}), '(level_1_danger)', True, 'import numpy as np\n'), ((99, 26, 99, 49), 'numpy.mean', 'np.mean', ({(99, 34, 99, 48): 'level_2_danger'}, {}), '(level_2_danger)', True, 'import numpy as np\n'), ((102, 18, 102, 38), 'numpy.mean', 'np.mean', ({(102, 26, 102, 37): 'ep_len_list'}, {}), '(ep_len_list)', True, 'import numpy as np\n'), ((15, 27, 15, 47), 'random.randint', 'random.randint', ({(15, 42, 15, 43): '(1)', (15, 45, 15, 46): '(6)'}, {}), '(1, 6)', False, 'import random\n')] |
toogy/pendigits-hmm | clean_data.py | 03382e1457941714439d40b67e53eaf117fe4d08 | import numpy as np
import pickle
from collections import defaultdict
from parsing import parser
from analysis import training
def main():
parse = parser.Parser();
train_digits = parse.parse_file('data/pendigits-train');
test_digits = parse.parse_file('data/pendigits-test')
centroids = training.get_digit_kmeans_centroids(
train_digits, 256 - 3)
training.set_digit_observations(
train_digits, centroids, 256)
training.set_digit_observations(
test_digits, centroids, 256)
train_sequences = defaultdict(list)
test_sequences = []
n_test_sequences = len(test_digits)
test_expected_labels = np.ndarray(shape=(n_test_sequences,))
for digit in train_digits:
train_sequences[digit.label].append(digit.np_array_observations)
for i, digit in enumerate(test_digits):
test_sequences.append(digit.np_array_observations)
test_expected_labels[i] = digit.label
with open('train_sequences', 'wb') as f:
pickle.dump(train_sequences, f)
with open('test_sequences', 'wb') as f:
pickle.dump(test_sequences, f)
with open('test_expected_labels', 'wb') as f:
pickle.dump(test_expected_labels, f)
if __name__ == '__main__':
main()
| [((9, 12, 9, 27), 'parsing.parser.Parser', 'parser.Parser', ({}, {}), '()', False, 'from parsing import parser\n'), ((14, 16, 15, 30), 'analysis.training.get_digit_kmeans_centroids', 'training.get_digit_kmeans_centroids', ({(15, 8, 15, 20): 'train_digits', (15, 22, 15, 29): '256 - 3'}, {}), '(train_digits, 256 - 3)', False, 'from analysis import training\n'), ((17, 4, 18, 37), 'analysis.training.set_digit_observations', 'training.set_digit_observations', ({(18, 8, 18, 20): 'train_digits', (18, 22, 18, 31): 'centroids', (18, 33, 18, 36): '(256)'}, {}), '(train_digits, centroids, 256)', False, 'from analysis import training\n'), ((19, 4, 20, 36), 'analysis.training.set_digit_observations', 'training.set_digit_observations', ({(20, 8, 20, 19): 'test_digits', (20, 21, 20, 30): 'centroids', (20, 32, 20, 35): '(256)'}, {}), '(test_digits, centroids, 256)', False, 'from analysis import training\n'), ((26, 22, 26, 39), 'collections.defaultdict', 'defaultdict', ({(26, 34, 26, 38): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((29, 27, 29, 64), 'numpy.ndarray', 'np.ndarray', (), '', True, 'import numpy as np\n'), ((41, 8, 41, 39), 'pickle.dump', 'pickle.dump', ({(41, 20, 41, 35): 'train_sequences', (41, 37, 41, 38): 'f'}, {}), '(train_sequences, f)', False, 'import pickle\n'), ((44, 8, 44, 38), 'pickle.dump', 'pickle.dump', ({(44, 20, 44, 34): 'test_sequences', (44, 36, 44, 37): 'f'}, {}), '(test_sequences, f)', False, 'import pickle\n'), ((47, 8, 47, 44), 'pickle.dump', 'pickle.dump', ({(47, 20, 47, 40): 'test_expected_labels', (47, 42, 47, 43): 'f'}, {}), '(test_expected_labels, f)', False, 'import pickle\n')] |
cypherdotXd/o3de | scripts/commit_validation/commit_validation/commit_validation.py | bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676 | #
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import abc
import importlib
import os
import pkgutil
import re
import time
from typing import Dict, List, Tuple
VERBOSE = False
class Commit(abc.ABC):
"""An interface for accessing details about a commit"""
@abc.abstractmethod
def get_files(self) -> List[str]:
"""Returns a list of local files added/modified by the commit"""
pass
@abc.abstractmethod
def get_removed_files(self) -> List[str]:
"""Returns a list of local files removed by the commit"""
pass
@abc.abstractmethod
def get_file_diff(self, str) -> str:
"""
Given a file name, returns a string in unified diff format
that represents the changes made to that file for this commit.
Most validators will only pay attention to added lines (with + in front)
"""
pass
@abc.abstractmethod
def get_description(self) -> str:
"""Returns the description of the commit"""
pass
@abc.abstractmethod
def get_author(self) -> str:
"""Returns the author of the commit"""
pass
def validate_commit(commit: Commit, out_errors: List[str] = None, ignore_validators: List[str] = None) -> bool:
"""Validates a commit against all validators
:param commit: The commit to validate
:param out_errors: if not None, will populate with the list of errors given by the validators
:param ignore_validators: Optional list of CommitValidator classes to ignore, by class name
:return: True if there are no validation errors, and False otherwise
"""
failed_count = 0
passed_count = 0
start_time = time.time()
# Find all the validators in the validators package (recursively)
validator_classes = []
validators_dir = os.path.join(os.path.dirname(__file__), 'validators')
for _, module_name, is_package in pkgutil.iter_modules([validators_dir]):
if not is_package:
module = importlib.import_module('commit_validation.validators.' + module_name)
validator = module.get_validator()
if ignore_validators and validator.__name__ in ignore_validators:
print(f"Disabled validation for '{validator.__name__}'")
else:
validator_classes.append(validator)
error_summary = {}
# Process validators
for validator_class in validator_classes:
validator = validator_class()
validator_name = validator.__class__.__name__
error_list = []
passed = validator.run(commit, errors = error_list)
if passed:
passed_count += 1
print(f'{validator.__class__.__name__} PASSED')
else:
failed_count += 1
print(f'{validator.__class__.__name__} FAILED')
error_summary[validator_name] = error_list
end_time = time.time()
if failed_count:
print("VALIDATION FAILURE SUMMARY")
for val_name in error_summary.keys():
errors = error_summary[val_name]
if errors:
for error_message in errors:
first_line = True
for line in error_message.splitlines():
if first_line:
first_line = False
print(f'VALIDATOR_FAILED: {val_name} {line}')
else:
print(f' {line}') # extra detail lines do not need machine parsing
stats_strs = []
if failed_count > 0:
stats_strs.append(f'{failed_count} failed')
if passed_count > 0:
stats_strs.append(f'{passed_count} passed')
stats_str = ', '.join(stats_strs) + f' in {end_time - start_time:.2f}s'
print()
print(stats_str)
return failed_count == 0
def IsFileSkipped(file_name) -> bool:
if os.path.splitext(file_name)[1].lower() not in SOURCE_AND_SCRIPT_FILE_EXTENSIONS:
skipped = True
for pattern in SOURCE_AND_SCRIPT_FILE_PATTERNS:
if pattern.match(file_name):
skipped = False
break
return skipped
return False
class CommitValidator(abc.ABC):
"""A commit validator"""
@abc.abstractmethod
def run(self, commit: Commit, errors: List[str]) -> bool:
"""Validates a commit
:param commit: The commit to validate
:param errors: List of errors generated, append them to this list
:return: True if the commit is valid, and False otherwise
"""
pass
SOURCE_FILE_EXTENSIONS: Tuple[str, ...] = (
'.c', '.cc', '.cpp', '.cxx', '.h', '.hpp', '.hxx', '.inl', '.m', '.mm', '.cs', '.java'
)
"""File extensions for compiled source code"""
SCRIPT_FILE_EXTENSIONS: Tuple[str, ...] = (
'.py', '.lua', '.bat', '.cmd', '.sh', '.js'
)
"""File extensions for interpreted code"""
BUILD_FILE_EXTENSIONS: Tuple[str, ...] = (
'.cmake',
)
"""File extensions for build files"""
SOURCE_AND_SCRIPT_FILE_EXTENSIONS: Tuple[str, ...] = SOURCE_FILE_EXTENSIONS + SCRIPT_FILE_EXTENSIONS + BUILD_FILE_EXTENSIONS
"""File extensions for both compiled and interpreted code"""
BUILD_FILE_PATTERNS: Tuple[re.Pattern, ...] = (
re.compile(r'.*CMakeLists\.txt'),
re.compile(r'.*Jenkinsfile')
)
"""File patterns for build files"""
SOURCE_AND_SCRIPT_FILE_PATTERNS: Tuple[re.Pattern, ...] = BUILD_FILE_PATTERNS
EXCLUDED_VALIDATION_PATTERNS = [
'*/.git/*',
'*/3rdParty/*',
'*/__pycache__/*',
'*/External/*',
'build',
'Cache',
'*/Code/Framework/AzCore/azgnmx/azgnmx/*',
'Code/Tools/CryFXC',
'Code/Tools/HLSLCrossCompiler',
'Code/Tools/HLSLCrossCompilerMETAL',
'Docs',
'python/runtime',
'restricted/*/Tools/*RemoteControl',
'Tools/3dsmax',
'*/user/Cache/*',
'*/user/log/*',
]
| [((62, 17, 62, 28), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((67, 38, 67, 76), 'pkgutil.iter_modules', 'pkgutil.iter_modules', ({(67, 59, 67, 75): '[validators_dir]'}, {}), '([validators_dir])', False, 'import pkgutil\n'), ((93, 15, 93, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((164, 4, 164, 36), 're.compile', 're.compile', ({(164, 15, 164, 35): '""".*CMakeLists\\\\.txt"""'}, {}), "('.*CMakeLists\\\\.txt')", False, 'import re\n'), ((165, 4, 165, 32), 're.compile', 're.compile', ({(165, 15, 165, 31): '""".*Jenkinsfile"""'}, {}), "('.*Jenkinsfile')", False, 'import re\n'), ((66, 34, 66, 59), 'os.path.dirname', 'os.path.dirname', ({(66, 50, 66, 58): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((69, 21, 69, 91), 'importlib.import_module', 'importlib.import_module', ({(69, 45, 69, 90): "'commit_validation.validators.' + module_name"}, {}), "('commit_validation.validators.' + module_name)", False, 'import importlib\n'), ((122, 7, 122, 34), 'os.path.splitext', 'os.path.splitext', ({(122, 24, 122, 33): 'file_name'}, {}), '(file_name)', False, 'import os\n')] |
KSaiRahul21/matrixprofile | matrixprofile/algorithms/snippets.py | d8250e30d90ed0453bb7c35bb34ab0c04ae7b334 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
from matrixprofile.algorithms.mpdist import mpdist_vector
def snippets(ts, snippet_size, num_snippets=2, window_size=None):
"""
The snippets algorithm is used to summarize your time series by
identifying N number of representative subsequences. If you want to
identify typical patterns in your time series, then this is the algorithm
to use.
Parameters
----------
ts : array_like
The time series.
snippet_size : int
The size of snippet desired.
num_snippets : int, Default 2
The number of snippets you would like to find.
window_size : int, Default (snippet_size / 2)
The window size.
Returns
-------
list : snippets
A list of snippets as dictionary objects with the following structure.
>>> {
>>> fraction: fraction of the snippet,
>>> index: the index of the snippet,
>>> snippet: the snippet values
>>> }
"""
ts = core.to_np_array(ts).astype('d')
n = len(ts)
if not isinstance(snippet_size, int) or snippet_size < 4:
raise ValueError('snippet_size must be an integer >= 4')
if n < (2 * snippet_size):
raise ValueError('Time series is too short relative to snippet length')
if not window_size:
window_size = int(np.floor(snippet_size / 2))
if window_size >= snippet_size:
raise ValueError('window_size must be smaller than snippet_size')
# pad end of time series with zeros
num_zeros = int(snippet_size * np.ceil(n / snippet_size) - n)
ts = np.append(ts, np.zeros(num_zeros))
# compute all profiles
indices = np.arange(0, len(ts) - snippet_size, snippet_size)
distances = []
for j, i in enumerate(indices):
distance = mpdist_vector(ts, ts[i:(i + snippet_size - 1)], int(window_size))
distances.append(distance)
distances = np.array(distances)
# find N snippets
snippets = []
minis = np.inf
total_min = None
for n in range(num_snippets):
minims = np.inf
for i in range(len(indices)):
s = np.sum(np.minimum(distances[i, :], minis))
if minims > s:
minims = s
index = i
minis = np.minimum(distances[index, :], minis)
actual_index = indices[index]
snippet = ts[actual_index:actual_index + snippet_size]
snippet_distance = distances[index]
snippets.append({
'index': actual_index,
'snippet': snippet,
'distance': snippet_distance
})
if isinstance(total_min, type(None)):
total_min = snippet_distance
else:
total_min = np.minimum(total_min, snippet_distance)
# compute the fraction of each snippet
for snippet in snippets:
mask = (snippet['distance'] <= total_min)
snippet['fraction'] = mask.sum() / (len(ts) - snippet_size)
total_min = total_min - mask
del snippet['distance']
return snippets
| [((74, 16, 74, 35), 'numpy.array', 'np.array', ({(74, 25, 74, 34): 'distances'}, {}), '(distances)', True, 'import numpy as np\n'), ((64, 23, 64, 42), 'numpy.zeros', 'np.zeros', ({(64, 32, 64, 41): 'num_zeros'}, {}), '(num_zeros)', True, 'import numpy as np\n'), ((90, 16, 90, 54), 'numpy.minimum', 'np.minimum', ({(90, 27, 90, 46): 'distances[(index), :]', (90, 48, 90, 53): 'minis'}, {}), '(distances[(index), :], minis)', True, 'import numpy as np\n'), ((47, 9, 47, 29), 'matrixprofile.core.to_np_array', 'core.to_np_array', ({(47, 26, 47, 28): 'ts'}, {}), '(ts)', False, 'from matrixprofile import core\n'), ((57, 26, 57, 52), 'numpy.floor', 'np.floor', ({(57, 35, 57, 51): 'snippet_size / 2'}, {}), '(snippet_size / 2)', True, 'import numpy as np\n'), ((103, 24, 103, 63), 'numpy.minimum', 'np.minimum', ({(103, 35, 103, 44): 'total_min', (103, 46, 103, 62): 'snippet_distance'}, {}), '(total_min, snippet_distance)', True, 'import numpy as np\n'), ((63, 35, 63, 60), 'numpy.ceil', 'np.ceil', ({(63, 43, 63, 59): 'n / snippet_size'}, {}), '(n / snippet_size)', True, 'import numpy as np\n'), ((84, 23, 84, 57), 'numpy.minimum', 'np.minimum', ({(84, 34, 84, 49): 'distances[(i), :]', (84, 51, 84, 56): 'minis'}, {}), '(distances[(i), :], minis)', True, 'import numpy as np\n')] |
yk/jina | jina/logging/formatter.py | ab66e233e74b956390f266881ff5dc4e0110d3ff | import json
import re
from copy import copy
from logging import Formatter
from .profile import used_memory
from ..helper import colored
class ColorFormatter(Formatter):
"""Format the log into colored logs based on the log-level. """
MAPPING = {
'DEBUG': dict(color='white', on_color=None), # white
'INFO': dict(color='white', on_color=None), # cyan
'WARNING': dict(color='yellow', on_color='on_grey'), # yellow
'ERROR': dict(color='red', on_color=None), # 31 for red
'CRITICAL': dict(color='white', on_color='on_red'), # white on red bg
'SUCCESS': dict(color='green', on_color=None), # white on red bg
} #: log-level to color mapping
def format(self, record):
cr = copy(record)
seq = self.MAPPING.get(cr.levelname, self.MAPPING['INFO']) # default white
cr.msg = colored(cr.msg, **seq)
return super().format(cr)
class PlainFormatter(Formatter):
"""Remove all control chars from the log and format it as plain text
Also restrict the max-length of msg to 512
"""
def format(self, record):
cr = copy(record)
if isinstance(cr.msg, str):
cr.msg = re.sub(r'\u001b\[.*?[@-~]', '', str(cr.msg))[:512]
return super().format(cr)
class JsonFormatter(Formatter):
"""Format the log message as a JSON object so that it can be later used/parsed in browser with javascript. """
KEYS = {'created', 'filename', 'funcName', 'levelname', 'lineno', 'msg',
'module', 'name', 'pathname', 'process', 'thread', 'processName',
'threadName', 'log_id'} #: keys to extract from the log
def format(self, record):
cr = copy(record)
cr.msg = re.sub(r'\u001b\[.*?[@-~]', '', str(cr.msg))
return json.dumps(
{k: getattr(cr, k) for k in self.KEYS if hasattr(cr, k)},
sort_keys=True)
class ProfileFormatter(Formatter):
"""Format the log message as JSON object and add the current used memory into it"""
def format(self, record):
cr = copy(record)
if isinstance(cr.msg, dict):
cr.msg.update({k: getattr(cr, k) for k in ['created', 'module', 'process', 'thread']})
cr.msg['memory'] = used_memory(unit=1)
return json.dumps(cr.msg, sort_keys=True)
else:
return ''
| [((23, 13, 23, 25), 'copy.copy', 'copy', ({(23, 18, 23, 24): 'record'}, {}), '(record)', False, 'from copy import copy\n'), ((36, 13, 36, 25), 'copy.copy', 'copy', ({(36, 18, 36, 24): 'record'}, {}), '(record)', False, 'from copy import copy\n'), ((50, 13, 50, 25), 'copy.copy', 'copy', ({(50, 18, 50, 24): 'record'}, {}), '(record)', False, 'from copy import copy\n'), ((61, 13, 61, 25), 'copy.copy', 'copy', ({(61, 18, 61, 24): 'record'}, {}), '(record)', False, 'from copy import copy\n'), ((65, 19, 65, 53), 'json.dumps', 'json.dumps', (), '', False, 'import json\n')] |
sugitanishi/competitive-programming | atcoder/abc191/b.py | 51af65fdce514ece12f8afbf142b809d63eefb5d | import sys
sys.setrecursionlimit(10000000)
input=lambda : sys.stdin.readline().rstrip()
n,x=map(int,input().split())
a=list(map(int,input().split()))
aa=list(filter(lambda b:b!=x,a))
print(*aa) | [((2, 0, 2, 31), 'sys.setrecursionlimit', 'sys.setrecursionlimit', ({(2, 22, 2, 30): '(10000000)'}, {}), '(10000000)', False, 'import sys\n'), ((3, 15, 3, 35), 'sys.stdin.readline', 'sys.stdin.readline', ({}, {}), '()', False, 'import sys\n')] |
cfogg/python-client | tests/integration/test_streaming_e2e.py | 40e6891c8240e6b2acd5df538e622e9f15de43d6 | """Streaming integration tests."""
# pylint:disable=no-self-use,invalid-name,too-many-arguments,too-few-public-methods,line-too-long
# pylint:disable=too-many-statements,too-many-locals,too-many-lines
import threading
import time
import json
from queue import Queue
from splitio.client.factory import get_factory
from tests.helpers.mockserver import SSEMockServer, SplitMockServer
try: # try to import python3 names. fallback to python2
from urllib.parse import parse_qs
except ImportError:
from urlparse import parse_qs
class StreamingIntegrationTests(object):
"""Test streaming operation and failover."""
def test_happiness(self):
"""Test initialization & splits/segment updates."""
auth_server_response = {
'pushEnabled': True,
'token': ('eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.'
'eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pO'
'RFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjcmliZVwiXSxcIk1UWXlNVGN4T1RRNE13P'
'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm'
'9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ'
'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh'
'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c'
'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E'
'vJh17WlOlAKhcD0')
}
split_changes = {
-1: {
'since': -1,
'till': 1,
'splits': [make_simple_split('split1', 1, True, False, 'on', 'user', True)]
},
1: {
'since': 1,
'till': 1,
'splits': []
}
}
segment_changes = {}
split_backend_requests = Queue()
split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests,
auth_server_response)
sse_requests = Queue()
sse_server = SSEMockServer(sse_requests)
split_backend.start()
sse_server.start()
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
kwargs = {
'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(),
'config': {'connectTimeout': 10000}
}
factory = get_factory('some_apikey', **kwargs)
factory.block_until_ready(1)
assert factory.ready
assert factory.client().get_treatment('maldo', 'split1') == 'on'
time.sleep(1)
split_changes[1] = {
'since': 1,
'till': 2,
'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)]
}
split_changes[2] = {'since': 2, 'till': 2, 'splits': []}
sse_server.publish(make_split_change_event(2))
time.sleep(1)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
split_changes[2] = {
'since': 2,
'till': 3,
'splits': [make_split_with_segment('split2', 2, True, False,
'off', 'user', 'off', 'segment1')]
}
split_changes[3] = {'since': 3, 'till': 3, 'splits': []}
segment_changes[('segment1', -1)] = {
'name': 'segment1',
'added': ['maldo'],
'removed': [],
'since': -1,
'till': 1
}
segment_changes[('segment1', 1)] = {'name': 'segment1', 'added': [],
'removed': [], 'since': 1, 'till': 1}
sse_server.publish(make_split_change_event(3))
time.sleep(1)
sse_server.publish(make_segment_change_event('segment1', 1))
time.sleep(1)
assert factory.client().get_treatment('pindon', 'split2') == 'off'
assert factory.client().get_treatment('maldo', 'split2') == 'on'
# Validate the SSE request
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05'
'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after first notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after second notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Segment change notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/segment1?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until segment1 since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/segment1?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def test_occupancy_flicker(self):
"""Test that changes in occupancy switch between polling & streaming properly."""
auth_server_response = {
'pushEnabled': True,
'token': ('eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.'
'eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pO'
'RFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjcmliZVwiXSxcIk1UWXlNVGN4T1RRNE13P'
'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm'
'9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ'
'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh'
'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c'
'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E'
'vJh17WlOlAKhcD0')
}
split_changes = {
-1: {
'since': -1,
'till': 1,
'splits': [make_simple_split('split1', 1, True, False, 'off', 'user', True)]
},
1: {'since': 1, 'till': 1, 'splits': []}
}
segment_changes = {}
split_backend_requests = Queue()
split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests,
auth_server_response)
sse_requests = Queue()
sse_server = SSEMockServer(sse_requests)
split_backend.start()
sse_server.start()
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
kwargs = {
'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(),
'config': {'connectTimeout': 10000, 'featuresRefreshRate': 10}
}
factory = get_factory('some_apikey', **kwargs)
factory.block_until_ready(1)
assert factory.ready
time.sleep(2)
# Get a hook of the task so we can query its status
task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access
assert not task.running()
assert factory.client().get_treatment('maldo', 'split1') == 'on'
# Make a change in the BE but don't send the event.
# After dropping occupancy, the sdk should switch to polling
# and perform a syncAll that gets this change
split_changes[1] = {
'since': 1,
'till': 2,
'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)]
}
split_changes[2] = {'since': 2, 'till': 2, 'splits': []}
sse_server.publish(make_occupancy('control_pri', 0))
sse_server.publish(make_occupancy('control_sec', 0))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
assert task.running()
# We make another chagne in the BE and don't send the event.
# We restore occupancy, and it should be fetched by the
# sync all after streaming is restored.
split_changes[2] = {
'since': 2,
'till': 3,
'splits': [make_simple_split('split1', 3, True, False, 'off', 'user', True)]
}
split_changes[3] = {'since': 3, 'till': 3, 'splits': []}
sse_server.publish(make_occupancy('control_pri', 1))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert not task.running()
# Now we make another change and send an event so it's propagated
split_changes[3] = {
'since': 3,
'till': 4,
'splits': [make_simple_split('split1', 4, True, False, 'off', 'user', False)]
}
split_changes[4] = {'since': 4, 'till': 4, 'splits': []}
sse_server.publish(make_split_change_event(4))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
# Kill the split
split_changes[4] = {
'since': 4,
'till': 5,
'splits': [make_simple_split('split1', 5, True, True, 'frula', 'user', False)]
}
split_changes[5] = {'since': 5, 'till': 5, 'splits': []}
sse_server.publish(make_split_kill_event('split1', 'frula', 5))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'frula'
# Validate the SSE request
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05'
'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after first notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after second notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=4'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Split kill
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=4'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=5'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def test_start_without_occupancy(self):
"""Test an SDK starting with occupancy on 0 and switching to streamin afterwards."""
auth_server_response = {
'pushEnabled': True,
'token': ('eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.'
'eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pO'
'RFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjcmliZVwiXSxcIk1UWXlNVGN4T1RRNE13P'
'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm'
'9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ'
'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh'
'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c'
'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E'
'vJh17WlOlAKhcD0')
}
split_changes = {
-1: {
'since': -1,
'till': 1,
'splits': [make_simple_split('split1', 1, True, False, 'off', 'user', True)]
},
1: {'since': 1, 'till': 1, 'splits': []}
}
segment_changes = {}
split_backend_requests = Queue()
split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests,
auth_server_response)
sse_requests = Queue()
sse_server = SSEMockServer(sse_requests)
split_backend.start()
sse_server.start()
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 0))
sse_server.publish(make_occupancy('control_sec', 0))
kwargs = {
'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(),
'config': {'connectTimeout': 10000, 'featuresRefreshRate': 10}
}
factory = get_factory('some_apikey', **kwargs)
factory.block_until_ready(1)
assert factory.ready
time.sleep(2)
# Get a hook of the task so we can query its status
task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access
assert task.running()
assert factory.client().get_treatment('maldo', 'split1') == 'on'
# Make a change in the BE but don't send the event.
# After restoring occupancy, the sdk should switch to polling
# and perform a syncAll that gets this change
split_changes[1] = {
'since': 1,
'till': 2,
'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)]
}
split_changes[2] = {'since': 2, 'till': 2, 'splits': []}
sse_server.publish(make_occupancy('control_sec', 1))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
assert not task.running()
# Validate the SSE request
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05'
'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after push down
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after push restored
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Second iteration of previous syncAll
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def test_streaming_status_changes(self):
"""Test changes between streaming enabled, paused and disabled."""
auth_server_response = {
'pushEnabled': True,
'token': ('eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.'
'eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pO'
'RFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjcmliZVwiXSxcIk1UWXlNVGN4T1RRNE13P'
'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm'
'9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ'
'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh'
'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c'
'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E'
'vJh17WlOlAKhcD0')
}
split_changes = {
-1: {
'since': -1,
'till': 1,
'splits': [make_simple_split('split1', 1, True, False, 'off', 'user', True)]
},
1: {'since': 1, 'till': 1, 'splits': []}
}
segment_changes = {}
split_backend_requests = Queue()
split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests,
auth_server_response)
sse_requests = Queue()
sse_server = SSEMockServer(sse_requests)
split_backend.start()
sse_server.start()
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
kwargs = {
'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(),
'config': {'connectTimeout': 10000, 'featuresRefreshRate': 10}
}
factory = get_factory('some_apikey', **kwargs)
factory.block_until_ready(1)
assert factory.ready
time.sleep(2)
# Get a hook of the task so we can query its status
task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access
assert not task.running()
assert factory.client().get_treatment('maldo', 'split1') == 'on'
# Make a change in the BE but don't send the event.
# After dropping occupancy, the sdk should switch to polling
# and perform a syncAll that gets this change
split_changes[1] = {
'since': 1,
'till': 2,
'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)]
}
split_changes[2] = {'since': 2, 'till': 2, 'splits': []}
sse_server.publish(make_control_event('STREAMING_PAUSED', 1))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
assert task.running()
# We make another chagne in the BE and don't send the event.
# We restore occupancy, and it should be fetched by the
# sync all after streaming is restored.
split_changes[2] = {
'since': 2,
'till': 3,
'splits': [make_simple_split('split1', 3, True, False, 'off', 'user', True)]
}
split_changes[3] = {'since': 3, 'till': 3, 'splits': []}
sse_server.publish(make_control_event('STREAMING_ENABLED', 2))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert not task.running()
# Now we make another change and send an event so it's propagated
split_changes[3] = {
'since': 3,
'till': 4,
'splits': [make_simple_split('split1', 4, True, False, 'off', 'user', False)]
}
split_changes[4] = {'since': 4, 'till': 4, 'splits': []}
sse_server.publish(make_split_change_event(4))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
assert not task.running()
split_changes[4] = {
'since': 4,
'till': 5,
'splits': [make_simple_split('split1', 5, True, False, 'off', 'user', True)]
}
split_changes[5] = {'since': 5, 'till': 5, 'splits': []}
sse_server.publish(make_control_event('STREAMING_DISABLED', 2))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert task.running()
assert 'PushStatusHandler' not in [t.name for t in threading.enumerate()]
# Validate the SSE request
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05'
'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll on push down
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after push is up
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=4'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming disabled
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=4'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=5'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def test_server_closes_connection(self):
"""Test that if the server closes the connection, the whole flow is retried with BO."""
auth_server_response = {
'pushEnabled': True,
'token': ('eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.'
'eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pO'
'RFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjcmliZVwiXSxcIk1UWXlNVGN4T1RRNE13P'
'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm'
'9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ'
'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh'
'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c'
'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E'
'vJh17WlOlAKhcD0')
}
split_changes = {
-1: {
'since': -1,
'till': 1,
'splits': [make_simple_split('split1', 1, True, False, 'on', 'user', True)]
},
1: {
'since': 1,
'till': 1,
'splits': []
}
}
segment_changes = {}
split_backend_requests = Queue()
split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests,
auth_server_response)
sse_requests = Queue()
sse_server = SSEMockServer(sse_requests)
split_backend.start()
sse_server.start()
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
kwargs = {
'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(),
'config': {'connectTimeout': 10000, 'featuresRefreshRate': 100,
'segmentsRefreshRate': 100, 'metricsRefreshRate': 100,
'impressionsRefreshRate': 100, 'eventsPushRate': 100}
}
factory = get_factory('some_apikey', **kwargs)
factory.block_until_ready(1)
assert factory.ready
assert factory.client().get_treatment('maldo', 'split1') == 'on'
task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access
assert not task.running()
time.sleep(1)
split_changes[1] = {
'since': 1,
'till': 2,
'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)]
}
split_changes[2] = {'since': 2, 'till': 2, 'splits': []}
sse_server.publish(make_split_change_event(2))
time.sleep(1)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
sse_server.publish(SSEMockServer.GRACEFUL_REQUEST_END)
time.sleep(1)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
assert task.running()
time.sleep(2) # wait for the backoff to expire so streaming gets re-attached
# re-send initial event AND occupancy
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
time.sleep(2)
assert not task.running()
split_changes[2] = {
'since': 2,
'till': 3,
'splits': [make_simple_split('split1', 3, True, False, 'off', 'user', True)]
}
split_changes[3] = {'since': 3, 'till': 3, 'splits': []}
sse_server.publish(make_split_change_event(3))
time.sleep(1)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert not task.running()
# Validate the SSE requests
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05'
'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05'
'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after first notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll on retryable error handling
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth after connection breaks
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected again
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after new notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def test_ably_errors_handling(self):
"""Test incoming ably errors and validate its handling."""
import logging
logger = logging.getLogger('splitio')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
auth_server_response = {
'pushEnabled': True,
'token': ('eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.'
'eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pO'
'RFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjcmliZVwiXSxcIk1UWXlNVGN4T1RRNE13P'
'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm'
'9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ'
'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh'
'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c'
'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E'
'vJh17WlOlAKhcD0')
}
split_changes = {
-1: {
'since': -1,
'till': 1,
'splits': [make_simple_split('split1', 1, True, False, 'off', 'user', True)]
},
1: {'since': 1, 'till': 1, 'splits': []}
}
segment_changes = {}
split_backend_requests = Queue()
split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests,
auth_server_response)
sse_requests = Queue()
sse_server = SSEMockServer(sse_requests)
split_backend.start()
sse_server.start()
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
kwargs = {
'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(),
'config': {'connectTimeout': 10000, 'featuresRefreshRate': 10}
}
factory = get_factory('some_apikey', **kwargs)
factory.block_until_ready(1)
assert factory.ready
time.sleep(2)
# Get a hook of the task so we can query its status
task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access
assert not task.running()
assert factory.client().get_treatment('maldo', 'split1') == 'on'
# Make a change in the BE but don't send the event.
# We'll send an ignorable error and check it has nothing happened
split_changes[1] = {
'since': 1,
'till': 2,
'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)]
}
split_changes[2] = {'since': 2, 'till': 2, 'splits': []}
sse_server.publish(make_ably_error_event(60000, 600))
time.sleep(1)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert not task.running()
sse_server.publish(make_ably_error_event(40145, 401))
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
time.sleep(3)
assert task.running()
assert factory.client().get_treatment('maldo', 'split1') == 'off'
# Re-publish initial events so that the retry succeeds
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
time.sleep(3)
assert not task.running()
# Assert streaming is working properly
split_changes[2] = {
'since': 2,
'till': 3,
'splits': [make_simple_split('split1', 3, True, False, 'off', 'user', True)]
}
split_changes[3] = {'since': 3, 'till': 3, 'splits': []}
sse_server.publish(make_split_change_event(3))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert not task.running()
# Send a non-retryable ably error
sse_server.publish(make_ably_error_event(40200, 402))
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
time.sleep(3)
# Assert sync-task is running and the streaming status handler thread is over
assert task.running()
assert 'PushStatusHandler' not in [t.name for t in threading.enumerate()]
# Validate the SSE requests
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05'
'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05'
'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll retriable error
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth again
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after push is up
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after non recoverable ably error
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def make_split_change_event(change_number):
"""Make a split change event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': change_number-1,
'encoding':'json',
'channel':'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'data': json.dumps({
'type': 'SPLIT_UPDATE',
'changeNumber': change_number
})
})
}
def make_split_kill_event(name, default_treatment, change_number):
"""Make a split change event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': change_number-1,
'encoding':'json',
'channel':'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'data': json.dumps({
'type': 'SPLIT_KILL',
'splitName': name,
'defaultTreatment': default_treatment,
'changeNumber': change_number
})
})
}
def make_initial_event():
"""Make a split change event."""
return {'id':'TVUsxaabHs:0:0'}
def make_occupancy(channel, publishers):
"""Make an occupancy event."""
return {
'event': 'message',
'data': json.dumps({
'id':'aP6EuhrcUm:0:0',
'timestamp':1604325712734,
'encoding': 'json',
'channel': "[?occupancy=metrics.publishers]%s" % channel,
'data': json.dumps({'metrics': {'publishers': publishers}}),
'name':'[meta]occupancy'
})
}
def make_segment_change_event(name, change_number):
"""Make a split change event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': change_number-1,
'encoding':'json',
'channel':'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'data': json.dumps({
'type': 'SEGMENT_UPDATE',
'segmentName': name,
'changeNumber': change_number
})
})
}
def make_control_event(control_type, timestamp):
"""Make a control event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': timestamp,
'encoding':'json',
'channel':'[?occupancy=metrics.publishers]control_pri',
'data': json.dumps({
'type': 'CONTROL',
'controlType': control_type,
})
})
}
def make_ably_error_event(code, status):
"""Make a control event."""
return {
'event': 'error',
'data': json.dumps({
'message':'Invalid accessToken in request: sarasa',
'code': code,
'statusCode': status,
'href':"https://help.ably.io/error/%d" % code
})
}
def make_simple_split(name, cn, active, killed, default_treatment, tt, on):
"""Make a simple split."""
return {
'trafficTypeName': tt,
'name': name,
'seed': 1699838640,
'status': 'ACTIVE' if active else 'ARCHIVED',
'changeNumber': cn,
'killed': killed,
'defaultTreatment': default_treatment,
'conditions': [
{
'matcherGroup': {
'combiner': 'AND',
'matchers': [
{
'matcherType': 'ALL_KEYS',
'negate': False,
'userDefinedSegmentMatcherData': None,
'whitelistMatcherData': None
}
]
},
'partitions': [
{'treatment': 'on' if on else 'off', 'size': 100},
{'treatment': 'off' if on else 'on', 'size': 0}
]
}
]
}
def make_split_with_segment(name, cn, active, killed, default_treatment,
tt, on, segment):
"""Make a split with a segment."""
return {
'trafficTypeName': tt,
'name': name,
'seed': cn,
'status': 'ACTIVE' if active else 'ARCHIVED',
'changeNumber': cn,
'killed': killed,
'defaultTreatment': default_treatment,
'configurations': {
'on': '{\'size\':15,\'test\':20}'
},
'conditions': [
{
'matcherGroup': {
'combiner': 'AND',
'matchers': [
{
'matcherType': 'IN_SEGMENT',
'negate': False,
'userDefinedSegmentMatcherData': {'segmentName': segment},
'whitelistMatcherData': None
}
]
},
'partitions': [{
'treatment': 'on' if on else 'off',
'size': 100
}]
}
]
}
| [((49, 33, 49, 40), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((50, 24, 51, 61), 'tests.helpers.mockserver.SplitMockServer', 'SplitMockServer', ({(50, 40, 50, 53): 'split_changes', (50, 55, 50, 70): 'segment_changes', (50, 72, 50, 94): 'split_backend_requests', (51, 40, 51, 60): 'auth_server_response'}, {}), '(split_changes, segment_changes, split_backend_requests,\n auth_server_response)', False, 'from tests.helpers.mockserver import SSEMockServer, SplitMockServer\n'), ((52, 23, 52, 30), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((53, 21, 53, 48), 'tests.helpers.mockserver.SSEMockServer', 'SSEMockServer', ({(53, 35, 53, 47): 'sse_requests'}, {}), '(sse_requests)', False, 'from tests.helpers.mockserver import SSEMockServer, SplitMockServer\n'), ((69, 18, 69, 54), 'splitio.client.factory.get_factory', 'get_factory', ({(69, 30, 69, 43): '"""some_apikey"""'}, {}), "('some_apikey', **kwargs)", False, 'from splitio.client.factory import get_factory\n'), ((74, 8, 74, 21), 'time.sleep', 'time.sleep', ({(74, 19, 74, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((82, 8, 82, 21), 'time.sleep', 'time.sleep', ({(82, 19, 82, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((103, 8, 103, 21), 'time.sleep', 'time.sleep', ({(103, 19, 103, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((105, 8, 105, 21), 'time.sleep', 'time.sleep', ({(105, 19, 105, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((115, 13, 115, 25), 'urlparse.parse_qs', 'parse_qs', ({(115, 22, 115, 24): 'qs'}, {}), '(qs)', False, 'from urlparse import parse_qs\n'), ((201, 24, 201, 41), 'threading.Event', 'threading.Event', ({}, {}), '()', False, 'import threading\n'), ((233, 33, 233, 40), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((234, 24, 235, 61), 'tests.helpers.mockserver.SplitMockServer', 'SplitMockServer', ({(234, 40, 234, 53): 'split_changes', (234, 55, 234, 70): 'segment_changes', (234, 72, 234, 94): 'split_backend_requests', (235, 40, 235, 60): 'auth_server_response'}, {}), '(split_changes, segment_changes, split_backend_requests,\n auth_server_response)', False, 'from tests.helpers.mockserver import SSEMockServer, SplitMockServer\n'), ((236, 23, 236, 30), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((237, 21, 237, 48), 'tests.helpers.mockserver.SSEMockServer', 'SSEMockServer', ({(237, 35, 237, 47): 'sse_requests'}, {}), '(sse_requests)', False, 'from tests.helpers.mockserver import SSEMockServer, SplitMockServer\n'), ((253, 18, 253, 54), 'splitio.client.factory.get_factory', 'get_factory', ({(253, 30, 253, 43): '"""some_apikey"""'}, {}), "('some_apikey', **kwargs)", False, 'from splitio.client.factory import get_factory\n'), ((256, 8, 256, 21), 'time.sleep', 'time.sleep', ({(256, 19, 256, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((276, 8, 276, 21), 'time.sleep', 'time.sleep', ({(276, 19, 276, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((291, 8, 291, 21), 'time.sleep', 'time.sleep', ({(291, 19, 291, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((303, 8, 303, 21), 'time.sleep', 'time.sleep', ({(303, 19, 303, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((314, 8, 314, 21), 'time.sleep', 'time.sleep', ({(314, 19, 314, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((322, 13, 322, 25), 'urlparse.parse_qs', 'parse_qs', ({(322, 22, 322, 24): 'qs'}, {}), '(qs)', False, 'from urlparse import parse_qs\n'), ((420, 24, 420, 41), 'threading.Event', 'threading.Event', ({}, {}), '()', False, 'import threading\n'), ((452, 33, 452, 40), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((453, 24, 454, 61), 'tests.helpers.mockserver.SplitMockServer', 'SplitMockServer', ({(453, 40, 453, 53): 'split_changes', (453, 55, 453, 70): 'segment_changes', (453, 72, 453, 94): 'split_backend_requests', (454, 40, 454, 60): 'auth_server_response'}, {}), '(split_changes, segment_changes, split_backend_requests,\n auth_server_response)', False, 'from tests.helpers.mockserver import SSEMockServer, SplitMockServer\n'), ((455, 23, 455, 30), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((456, 21, 456, 48), 'tests.helpers.mockserver.SSEMockServer', 'SSEMockServer', ({(456, 35, 456, 47): 'sse_requests'}, {}), '(sse_requests)', False, 'from tests.helpers.mockserver import SSEMockServer, SplitMockServer\n'), ((472, 18, 472, 54), 'splitio.client.factory.get_factory', 'get_factory', ({(472, 30, 472, 43): '"""some_apikey"""'}, {}), "('some_apikey', **kwargs)", False, 'from splitio.client.factory import get_factory\n'), ((475, 8, 475, 21), 'time.sleep', 'time.sleep', ({(475, 19, 475, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((493, 8, 493, 21), 'time.sleep', 'time.sleep', ({(493, 19, 493, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((502, 13, 502, 25), 'urlparse.parse_qs', 'parse_qs', ({(502, 22, 502, 24): 'qs'}, {}), '(qs)', False, 'from urlparse import parse_qs\n'), ((570, 24, 570, 41), 'threading.Event', 'threading.Event', ({}, {}), '()', False, 'import threading\n'), ((602, 33, 602, 40), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((603, 24, 604, 61), 'tests.helpers.mockserver.SplitMockServer', 'SplitMockServer', ({(603, 40, 603, 53): 'split_changes', (603, 55, 603, 70): 'segment_changes', (603, 72, 603, 94): 'split_backend_requests', (604, 40, 604, 60): 'auth_server_response'}, {}), '(split_changes, segment_changes, split_backend_requests,\n auth_server_response)', False, 'from tests.helpers.mockserver import SSEMockServer, SplitMockServer\n'), ((605, 23, 605, 30), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((606, 21, 606, 48), 'tests.helpers.mockserver.SSEMockServer', 'SSEMockServer', ({(606, 35, 606, 47): 'sse_requests'}, {}), '(sse_requests)', False, 'from tests.helpers.mockserver import SSEMockServer, SplitMockServer\n'), ((622, 18, 622, 54), 'splitio.client.factory.get_factory', 'get_factory', ({(622, 30, 622, 43): '"""some_apikey"""'}, {}), "('some_apikey', **kwargs)", False, 'from splitio.client.factory import get_factory\n'), ((625, 8, 625, 21), 'time.sleep', 'time.sleep', ({(625, 19, 625, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((644, 8, 644, 21), 'time.sleep', 'time.sleep', ({(644, 19, 644, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((659, 8, 659, 21), 'time.sleep', 'time.sleep', ({(659, 19, 659, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((671, 8, 671, 21), 'time.sleep', 'time.sleep', ({(671, 19, 671, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((682, 8, 682, 21), 'time.sleep', 'time.sleep', ({(682, 19, 682, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((692, 13, 692, 25), 'urlparse.parse_qs', 'parse_qs', ({(692, 22, 692, 24): 'qs'}, {}), '(qs)', False, 'from urlparse import parse_qs\n'), ((790, 24, 790, 41), 'threading.Event', 'threading.Event', ({}, {}), '()', False, 'import threading\n'), ((826, 33, 826, 40), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((827, 24, 828, 61), 'tests.helpers.mockserver.SplitMockServer', 'SplitMockServer', ({(827, 40, 827, 53): 'split_changes', (827, 55, 827, 70): 'segment_changes', (827, 72, 827, 94): 'split_backend_requests', (828, 40, 828, 60): 'auth_server_response'}, {}), '(split_changes, segment_changes, split_backend_requests,\n auth_server_response)', False, 'from tests.helpers.mockserver import SSEMockServer, SplitMockServer\n'), ((829, 23, 829, 30), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((830, 21, 830, 48), 'tests.helpers.mockserver.SSEMockServer', 'SSEMockServer', ({(830, 35, 830, 47): 'sse_requests'}, {}), '(sse_requests)', False, 'from tests.helpers.mockserver import SSEMockServer, SplitMockServer\n'), ((848, 18, 848, 54), 'splitio.client.factory.get_factory', 'get_factory', ({(848, 30, 848, 43): '"""some_apikey"""'}, {}), "('some_apikey', **kwargs)", False, 'from splitio.client.factory import get_factory\n'), ((855, 8, 855, 21), 'time.sleep', 'time.sleep', ({(855, 19, 855, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((863, 8, 863, 21), 'time.sleep', 'time.sleep', ({(863, 19, 863, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((867, 8, 867, 21), 'time.sleep', 'time.sleep', ({(867, 19, 867, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((871, 8, 871, 21), 'time.sleep', 'time.sleep', ({(871, 19, 871, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((877, 8, 877, 21), 'time.sleep', 'time.sleep', ({(877, 19, 877, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((887, 8, 887, 21), 'time.sleep', 'time.sleep', ({(887, 19, 887, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((896, 13, 896, 25), 'urlparse.parse_qs', 'parse_qs', ({(896, 22, 896, 24): 'qs'}, {}), '(qs)', False, 'from urlparse import parse_qs\n'), ((919, 13, 919, 25), 'urlparse.parse_qs', 'parse_qs', ({(919, 22, 919, 24): 'qs'}, {}), '(qs)', False, 'from urlparse import parse_qs\n'), ((1011, 24, 1011, 41), 'threading.Event', 'threading.Event', ({}, {}), '()', False, 'import threading\n'), ((1021, 17, 1021, 45), 'logging.getLogger', 'logging.getLogger', ({(1021, 35, 1021, 44): '"""splitio"""'}, {}), "('splitio')", False, 'import logging\n'), ((1022, 18, 1022, 41), 'logging.StreamHandler', 'logging.StreamHandler', ({}, {}), '()', False, 'import logging\n'), ((1023, 20, 1023, 92), 'logging.Formatter', 'logging.Formatter', ({(1023, 38, 1023, 91): '"""%(asctime)s %(name)-12s %(levelname)-8s %(message)s"""'}, {}), "('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')", False, 'import logging\n'), ((1050, 33, 1050, 40), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((1051, 24, 1052, 61), 'tests.helpers.mockserver.SplitMockServer', 'SplitMockServer', ({(1051, 40, 1051, 53): 'split_changes', (1051, 55, 1051, 70): 'segment_changes', (1051, 72, 1051, 94): 'split_backend_requests', (1052, 40, 1052, 60): 'auth_server_response'}, {}), '(split_changes, segment_changes, split_backend_requests,\n auth_server_response)', False, 'from tests.helpers.mockserver import SSEMockServer, SplitMockServer\n'), ((1053, 23, 1053, 30), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((1054, 21, 1054, 48), 'tests.helpers.mockserver.SSEMockServer', 'SSEMockServer', ({(1054, 35, 1054, 47): 'sse_requests'}, {}), '(sse_requests)', False, 'from tests.helpers.mockserver import SSEMockServer, SplitMockServer\n'), ((1070, 18, 1070, 54), 'splitio.client.factory.get_factory', 'get_factory', ({(1070, 30, 1070, 43): '"""some_apikey"""'}, {}), "('some_apikey', **kwargs)", False, 'from splitio.client.factory import get_factory\n'), ((1073, 8, 1073, 21), 'time.sleep', 'time.sleep', ({(1073, 19, 1073, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((1091, 8, 1091, 21), 'time.sleep', 'time.sleep', ({(1091, 19, 1091, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((1097, 8, 1097, 21), 'time.sleep', 'time.sleep', ({(1097, 19, 1097, 20): '(3)'}, {}), '(3)', False, 'import time\n'), ((1105, 8, 1105, 21), 'time.sleep', 'time.sleep', ({(1105, 19, 1105, 20): '(3)'}, {}), '(3)', False, 'import time\n'), ((1116, 8, 1116, 21), 'time.sleep', 'time.sleep', ({(1116, 19, 1116, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((1123, 8, 1123, 21), 'time.sleep', 'time.sleep', ({(1123, 19, 1123, 20): '(3)'}, {}), '(3)', False, 'import time\n'), ((1134, 13, 1134, 25), 'urlparse.parse_qs', 'parse_qs', ({(1134, 22, 1134, 24): 'qs'}, {}), '(qs)', False, 'from urlparse import parse_qs\n'), ((1156, 13, 1156, 25), 'urlparse.parse_qs', 'parse_qs', ({(1156, 22, 1156, 24): 'qs'}, {}), '(qs)', False, 'from urlparse import parse_qs\n'), ((1248, 24, 1248, 41), 'threading.Event', 'threading.Event', ({}, {}), '()', False, 'import threading\n'), ((1349, 16, 1354, 10), 'json.dumps', 'json.dumps', ({(1349, 27, 1354, 9): "{'message': 'Invalid accessToken in request: sarasa', 'code': code,\n 'statusCode': status, 'href': 'https://help.ably.io/error/%d' % code}"}, {}), "({'message': 'Invalid accessToken in request: sarasa', 'code':\n code, 'statusCode': status, 'href': 'https://help.ably.io/error/%d' % code}\n )", False, 'import json\n'), ((1266, 20, 1269, 14), 'json.dumps', 'json.dumps', ({(1266, 31, 1269, 13): "{'type': 'SPLIT_UPDATE', 'changeNumber': change_number}"}, {}), "({'type': 'SPLIT_UPDATE', 'changeNumber': change_number})", False, 'import json\n'), ((1283, 20, 1288, 14), 'json.dumps', 'json.dumps', ({(1283, 31, 1288, 13): "{'type': 'SPLIT_KILL', 'splitName': name, 'defaultTreatment':\n default_treatment, 'changeNumber': change_number}"}, {}), "({'type': 'SPLIT_KILL', 'splitName': name, 'defaultTreatment':\n default_treatment, 'changeNumber': change_number})", False, 'import json\n'), ((1305, 20, 1305, 71), 'json.dumps', 'json.dumps', ({(1305, 31, 1305, 70): "{'metrics': {'publishers': publishers}}"}, {}), "({'metrics': {'publishers': publishers}})", False, 'import json\n'), ((1320, 20, 1324, 14), 'json.dumps', 'json.dumps', ({(1320, 31, 1324, 13): "{'type': 'SEGMENT_UPDATE', 'segmentName': name, 'changeNumber': change_number}"}, {}), "({'type': 'SEGMENT_UPDATE', 'segmentName': name, 'changeNumber':\n change_number})", False, 'import json\n'), ((1338, 20, 1341, 14), 'json.dumps', 'json.dumps', ({(1338, 31, 1341, 13): "{'type': 'CONTROL', 'controlType': control_type}"}, {}), "({'type': 'CONTROL', 'controlType': control_type})", False, 'import json\n'), ((685, 59, 685, 80), 'threading.enumerate', 'threading.enumerate', ({}, {}), '()', False, 'import threading\n'), ((1127, 59, 1127, 80), 'threading.enumerate', 'threading.enumerate', ({}, {}), '()', False, 'import threading\n')] |
weezel/BandEventNotifier | venues/abstract_venue.py | 55824ba26aba9882f46d1770ec5df592a5dc32bc | import re
from abc import ABC, abstractmethod
from typing import Any, Dict, Generator
class IncorrectVenueImplementation(Exception):
pass
# class AbstractVenue(metaclass=ABC):
class AbstractVenue(ABC):
def __init__(self):
self.url = ""
self.name = ""
self.city = ""
self.country = ""
self.pricepat_monetary = re.compile("[0-9.,]+.€")
self.pricepat_plain = re.compile("[0-9.,]+")
def get_venue_name(self) -> str:
return self.name
def get_city(self) -> str:
return self.city
def get_country(self) -> str:
return self.country
def event_sqlentity(self) -> Dict[str, str]:
return {"name": self.name,
"city": self.city,
"country": self.country}
def parse_price(self, info_tag: str) -> str:
prices_with_mon = self.pricepat_monetary.findall(info_tag)
prices = []
for price in prices_with_mon:
parsed_price = self.pricepat_plain.findall(price)
if len(parsed_price) == 0:
continue
prices.append("".join(parsed_price))
if len(prices) == 0:
return "0€"
elif len(prices) == 2:
in_advance, from_door = prices[0], prices[1]
return f"{in_advance}€/{from_door}€"
return "{}€".format("".join(prices))
# FIXME Proper class type checking
def __eq__(self, other):
return hasattr(other, "url") \
and other.url == self.url
@abstractmethod
def parse_events(self, data: Any) \
-> Generator[Dict[str, Any], None, None]:
pass
| [((18, 33, 18, 59), 're.compile', 're.compile', ({(18, 44, 18, 58): '"""[0-9.,]+.€"""'}, {}), "('[0-9.,]+.€')", False, 'import re\n'), ((19, 30, 19, 52), 're.compile', 're.compile', ({(19, 41, 19, 51): '"""[0-9.,]+"""'}, {}), "('[0-9.,]+')", False, 'import re\n')] |
vandurme/TFMTL | mtl/util/pipeline.py | 5958187900bdf67089a237c523b6caa899f63ac1 | # Copyright 2018 Johns Hopkins University. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import tensorflow as tf
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import parsing_ops
class Pipeline(object):
def __init__(self, tfrecord_file, feature_map, batch_size=32,
num_threads=4, prefetch_buffer_size=1,
static_max_length=None, shuffle_buffer_size=10000,
shuffle=True, num_epochs=None, one_shot=False):
self._feature_map = feature_map
self._batch_size = batch_size
self._static_max_length = static_max_length
# Initialize the dataset
dataset = tf.data.TFRecordDataset(tfrecord_file)
# Maybe randomize
if shuffle:
dataset = dataset.shuffle(shuffle_buffer_size)
# Maybe repeat
if num_epochs is None:
dataset = dataset.repeat() # repeat indefinitely
elif num_epochs > 1:
dataset = dataset.repeat(count=num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(self.parse_example,
num_parallel_calls=num_threads)
# Pre-fetch a batch for faster processing
dataset = dataset.prefetch(prefetch_buffer_size)
# Get the iterator
if one_shot:
self._iterator = dataset.make_one_shot_iterator()
else:
self._iterator = dataset.make_initializable_iterator()
self._init_op = self._iterator.initializer
# Get outputs
self._outputs = self._iterator.get_next()
# Map to features
index = 0
result = {}
for key in sorted(self._feature_map.keys()):
result[key] = self._outputs[index]
index += 1
self._result = result
def pad(self, t):
s = tf.shape(t)
paddings = [[0, 0], [0, self._static_max_length - s[1]]]
x = tf.pad(t, paddings, 'CONSTANT', constant_values=0)
x = tf.reshape(x, [s[0], self._static_max_length])
assert x.get_shape().as_list()[1] is self._static_max_length
return x
def parse_example(self, serialized):
parsed = parsing_ops.parse_example(serialized, self._feature_map)
result = []
for key in sorted(self._feature_map.keys()):
val = parsed[key]
if isinstance(val, sparse_tensor_lib.SparseTensor):
dense_tensor = tf.sparse_tensor_to_dense(val)
if self._static_max_length is not None:
dense_tensor = self.pad(dense_tensor)
result.append(dense_tensor)
else:
result.append(val)
return tuple(result)
@property
def iterator(self):
return self._iterator
@property
def init_op(self):
return self._init_op
@property
def batch(self):
return self._result
# namedtuple for bucket_info object (used in Pipeline)
# func: a mapping from examples to tf.int64 keys
# pads: a set of tf shapes that correspond to padded examples
bucket_info = namedtuple("bucket_info", "func pads")
def int64_feature(value):
""" Takes a single int (e.g. 3) and converts it to a tf Feature """
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(sequence):
""" Sequence of ints (e.g [1,2,3]) to TF feature """
return tf.train.Feature(int64_list=tf.train.Int64List(value=sequence))
| [((112, 14, 112, 52), 'collections.namedtuple', 'namedtuple', ({(112, 25, 112, 38): '"""bucket_info"""', (112, 40, 112, 51): '"""func pads"""'}, {}), "('bucket_info', 'func pads')", False, 'from collections import namedtuple\n'), ((37, 18, 37, 56), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', ({(37, 42, 37, 55): 'tfrecord_file'}, {}), '(tfrecord_file)', True, 'import tensorflow as tf\n'), ((75, 12, 75, 23), 'tensorflow.shape', 'tf.shape', ({(75, 21, 75, 22): 't'}, {}), '(t)', True, 'import tensorflow as tf\n'), ((77, 12, 77, 62), 'tensorflow.pad', 'tf.pad', (), '', True, 'import tensorflow as tf\n'), ((78, 12, 78, 58), 'tensorflow.reshape', 'tf.reshape', ({(78, 23, 78, 24): 'x', (78, 26, 78, 57): '[s[0], self._static_max_length]'}, {}), '(x, [s[0], self._static_max_length])', True, 'import tensorflow as tf\n'), ((83, 17, 83, 73), 'tensorflow.python.ops.parsing_ops.parse_example', 'parsing_ops.parse_example', ({(83, 43, 83, 53): 'serialized', (83, 55, 83, 72): 'self._feature_map'}, {}), '(serialized, self._feature_map)', False, 'from tensorflow.python.ops import parsing_ops\n'), ((117, 39, 117, 72), 'tensorflow.train.Int64List', 'tf.train.Int64List', (), '', True, 'import tensorflow as tf\n'), ((122, 39, 122, 73), 'tensorflow.train.Int64List', 'tf.train.Int64List', (), '', True, 'import tensorflow as tf\n'), ((88, 31, 88, 61), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', ({(88, 57, 88, 60): 'val'}, {}), '(val)', True, 'import tensorflow as tf\n')] |
dabeaz/py65 | src/py65/devices/mpu65c02.py | 62d790445018f0616508022912b67d8d64935a29 | from py65.devices import mpu6502
from py65.utils.devices import make_instruction_decorator
class MPU(mpu6502.MPU):
def __init__(self, *args, **kwargs):
mpu6502.MPU.__init__(self, *args, **kwargs)
self.name = '65C02'
self.waiting = False
def step(self):
if self.waiting:
self.processorCycles += 1
else:
mpu6502.MPU.step(self)
return self
# Make copies of the lists
instruct = mpu6502.MPU.instruct[:]
cycletime = mpu6502.MPU.cycletime[:]
extracycles = mpu6502.MPU.extracycles[:]
disassemble = mpu6502.MPU.disassemble[:]
instruction = make_instruction_decorator(instruct, disassemble,
cycletime, extracycles)
# addressing modes
def ZeroPageIndirectAddr(self):
return self.WordAt( 255 & (self.ByteAt(self.pc)))
def AccumulatorAddr(self):
return self.a
# operations
def opRMB(self, x, mask):
address = x()
self.memory[address] &= mask
def opSMB(self, x, mask):
address = x()
self.memory[address] |= mask
def opSTZ(self, x):
self.memory[x()] = 0x00
def opTSB(self, x):
address = x()
m = self.memory[address]
self.p &= ~self.ZERO
z = m & self.a
if z != 0:
self.p |= self.ZERO
self.memory[address] = m | self.a
def opTRB(self, x):
address = x()
m = self.memory[address]
self.p &= ~self.ZERO
z = m & self.a
if z != 0:
self.p |= self.ZERO
self.memory[address] = m & ~self.a
# instructions
@instruction(name="RMB0", mode="zpg", cycles=5)
def inst_0x07(self):
self.opRMB(self.ZeroPageAddr, 0xFE)
self.pc += 1
@instruction(name="ORA", mode="zpi", cycles=5)
def inst_0x12(self):
self.opORA(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="RMB1", mode="zpg", cycles=5)
def inst_0x17(self):
self.opRMB(self.ZeroPageAddr, 0xFD)
self.pc += 1
@instruction(name="RMB2", mode="zpg", cycles=5)
def inst_0x27(self):
self.opRMB(self.ZeroPageAddr, 0xFB)
self.pc += 1
@instruction(name="AND", mode="zpi", cycles=5)
def inst_0x32(self):
self.opAND(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="BIT", mode="zpx", cycles=4)
def inst_0x34(self):
self.opBIT(self.ZeroPageXAddr)
self.pc += 1
@instruction(name="RMB3", mode="zpg", cycles=5)
def inst_0x37(self):
self.opRMB(self.ZeroPageAddr, 0xF7)
self.pc += 1
@instruction(name="BIT", mode="abx", cycles=4)
def inst_0x3c(self):
self.opBIT(self.AbsoluteXAddr)
self.pc += 2
@instruction(name="RMB4", mode="zpg", cycles=5)
def inst_0x47(self):
self.opRMB(self.ZeroPageAddr, 0xEF)
self.pc += 1
@instruction(name="EOR", mode="zpi", cycles=5)
def inst_0x52(self):
self.opEOR(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="RMB5", mode="zpg", cycles=5)
def inst_0x57(self):
self.opRMB(self.ZeroPageAddr, 0xDF)
self.pc += 1
@instruction(name="PHY", mode="imp", cycles=3)
def inst_0x5a(self):
self.stPush(self.y)
@instruction(name="STZ", mode="imp", cycles=3)
def inst_0x64(self):
self.opSTZ(self.ZeroPageAddr)
self.pc += 1
@instruction(name="RMB6", mode="zpg", cycles=5)
def inst_0x67(self):
self.opRMB(self.ZeroPageAddr, 0xBF)
self.pc += 1
@instruction(name="ADC", mode="zpi", cycles=5)
def inst_0x72(self):
self.opADC(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="STZ", mode="zpx", cycles=4)
def inst_0x74(self):
self.opSTZ(self.ZeroPageXAddr)
self.pc += 1
@instruction(name="PHY", mode="imp", cycles=4)
def inst_0x7a(self):
self.y = self.stPop()
self.FlagsNZ(self.y)
@instruction(name="RMB7", mode="zpg", cycles=5)
def inst_0x77(self):
self.opRMB(self.ZeroPageAddr, 0x7F)
self.pc += 1
@instruction(name="SMB0", mode="zpg", cycles=5)
def inst_0x87(self):
self.opSMB(self.ZeroPageAddr, 0x01)
self.pc += 1
@instruction(name="STA", mode="zpi", cycles=5)
def inst_0x92(self):
self.opSTA(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="SMB1", mode="zpg", cycles=5)
def inst_0x97(self):
self.opSMB(self.ZeroPageAddr, 0x02)
self.pc += 1
@instruction(name="STZ", mode="abs", cycles=4)
def inst_0x9c(self):
self.opSTZ(self.AbsoluteAddr)
self.pc += 2
@instruction(name="STZ", mode="abx", cycles=5)
def inst_0x9e(self):
self.opSTZ(self.AbsoluteXAddr)
self.pc += 2
@instruction(name="SMB2", mode="zpg", cycles=5)
def inst_0xa7(self):
self.opSMB(self.ZeroPageAddr, 0x04)
self.pc += 1
@instruction(name="LDA", mode="zpi", cycles=5)
def inst_0xb2(self):
self.opLDA(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="SMB3", mode="zpg", cycles=5)
def inst_0xb7(self):
self.opSMB(self.ZeroPageAddr, 0x08)
self.pc += 1
@instruction(name="SMB4", mode="zpg", cycles=5)
def inst_0xc7(self):
self.opSMB(self.ZeroPageAddr, 0x10)
self.pc += 1
@instruction(name="SMB5", mode="zpg", cycles=5)
def inst_0xd7(self):
self.opSMB(self.ZeroPageAddr, 0x20)
self.pc += 1
@instruction(name="PHX", mode="imp", cycles=3)
def inst_0xda(self):
self.stPush(self.x)
@instruction(name="SMB6", mode="zpg", cycles=5)
def inst_0xe7(self):
self.opSMB(self.ZeroPageAddr, 0x40)
self.pc += 1
@instruction(name="SMB7", mode="zpg", cycles=5)
def inst_0xf7(self):
self.opSMB(self.ZeroPageAddr, 0x80)
self.pc += 1
@instruction(name="PLX", mode="imp", cycles=4)
def inst_0xfa(self):
self.x = self.stPop()
self.FlagsNZ(self.x)
@instruction(name="TSB", mode="zpg", cycles=5)
def inst_0x04(self):
self.opTSB(self.ZeroPageAddr)
self.pc += 1
@instruction(name="TSB", mode="abs", cycles=6)
def inst_0x0c(self):
self.opTSB(self.AbsoluteAddr)
self.pc += 2
@instruction(name="TRB", mode="zpg", cycles=5)
def inst_0x14(self):
self.opTRB(self.ZeroPageAddr)
self.pc += 1
@instruction(name="INC", mode="acc", cycles=2)
def inst_0x1a(self):
self.opINCR(None)
@instruction(name="TRB", mode="abs", cycles=6)
def inst_0x1c(self):
self.opTRB(self.AbsoluteAddr)
self.pc += 2
@instruction(name="DEC", mode="acc", cycles=2)
def inst_0x3a(self):
self.opDECR(None)
@instruction(name="BRA", mode="rel", cycles=1, extracycles=1)
def inst_0x80(self):
self.BranchRelAddr()
@instruction(name="WAI", mode='imp', cycles=3)
def inst_0xCB(self):
self.waiting = True
@instruction(name="CMP", mode='zpi', cycles=6) # Don't know cycles
def inst_0xD2(self):
self.opCPY(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="SBC", mode="zpi", cycles=5)
def inst_0xf2(self):
self.opSBC(self.ZeroPageIndirectAddr)
self.pc += 1
| [((23, 18, 24, 71), 'py65.utils.devices.make_instruction_decorator', 'make_instruction_decorator', ({(23, 45, 23, 53): 'instruct', (23, 55, 23, 66): 'disassemble', (24, 48, 24, 57): 'cycletime', (24, 59, 24, 70): 'extracycles'}, {}), '(instruct, disassemble, cycletime, extracycles)', False, 'from py65.utils.devices import make_instruction_decorator\n'), ((6, 8, 6, 51), 'py65.devices.mpu6502.MPU.__init__', 'mpu6502.MPU.__init__', ({(6, 29, 6, 33): 'self', (6, 35, 6, 40): '*args'}, {}), '(self, *args, **kwargs)', False, 'from py65.devices import mpu6502\n'), ((14, 12, 14, 34), 'py65.devices.mpu6502.MPU.step', 'mpu6502.MPU.step', ({(14, 29, 14, 33): 'self'}, {}), '(self)', False, 'from py65.devices import mpu6502\n')] |
soerendip/ms-mint | tests/test__io.py | bf5f5d87d07a0d2108c6cd0d92c278f2ea762e58 | import pandas as pd
import shutil
import os
import io
from ms_mint.Mint import Mint
from pathlib import Path as P
from ms_mint.io import (
ms_file_to_df,
mzml_to_pandas_df_pyteomics,
convert_ms_file_to_feather,
convert_ms_file_to_parquet,
MZMLB_AVAILABLE,
)
from paths import (
TEST_MZML,
TEST_MZXML,
TEST_PARQUET,
TEST_MZMLB_POS,
TEST_MZML_POS,
TEST_MZML_NEG,
)
def test__ms_file_to_df__mzML():
result = ms_file_to_df(TEST_MZML)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__ms_file_to_df__mzML_timeunit_minutes():
result = ms_file_to_df(TEST_MZML, time_unit="minutes")
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__ms_file_to_df__mzXML():
result = ms_file_to_df(TEST_MZXML)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__mzml_to_pandas_df_pyteomics_pos():
result = mzml_to_pandas_df_pyteomics(TEST_MZML_POS)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
assert all(result.polarity == "+"), f'Polarity should be "+"\n{result}'
def test__mzml_to_pandas_df_pyteomics_neg():
result = mzml_to_pandas_df_pyteomics(TEST_MZML_NEG)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
assert all(result.polarity == "-"), f'Polarity should be "-"\n{result}'
def test__read_parquet():
result = ms_file_to_df(TEST_PARQUET)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__write_read_hdf(tmpdir):
df = ms_file_to_df(TEST_PARQUET)
fn = P(tmpdir) / "file.hdf"
df.to_hdf(fn, key="data")
result = ms_file_to_df(fn)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__read_mzMLb(tmpdir):
if not MZMLB_AVAILABLE:
return None
result = ms_file_to_df(TEST_MZMLB_POS)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
# assert all(result.polarity == '+'), f'Polarity should be "+"\n{result}'
def test__convert_ms_file_to_feather(tmpdir):
print(tmpdir)
shutil.copy(TEST_MZML, tmpdir)
fn = P(tmpdir) / P(TEST_MZML).name
fn_out = fn.with_suffix(".feather")
print(fn, fn_out)
convert_ms_file_to_feather(fn)
assert fn_out.is_file(), f"File not generated {fn_out}"
df = ms_file_to_df(fn)
df_fea = ms_file_to_df(fn_out)
assert df_fea.equals(df), "DataFrames not equal"
def test__convert_ms_file_to_parquet(tmpdir):
print(tmpdir)
shutil.copy(TEST_MZML, tmpdir)
fn = P(tmpdir) / P(TEST_MZML).name
fn_out = fn.with_suffix(".parquet")
print(fn, fn_out)
convert_ms_file_to_parquet(fn)
assert fn_out.is_file(), f"File not generated {fn_out}"
df = ms_file_to_df(fn)
df_fea = ms_file_to_df(fn_out)
assert df_fea.equals(df), "DataFrames not equal"
def test__export_to_excel(tmp_path):
filename = os.path.join(tmp_path, "output.xlsx")
mint = Mint(verbose=True)
mint.ms_files = "tests/data/test.mzXML"
mint.run()
mint.export(filename)
assert os.path.isfile(filename)
def test__export_to_excel_without_fn():
mint = Mint(verbose=True)
mint.ms_files = TEST_MZXML
mint.targets = pd.DataFrame(
{
"peak_label": ["A"],
"mz_mean": [200],
"mz_width": [10],
"intensity_threshold": [0],
"rt_min": [0],
"rt_max": [10],
"targets_filename": ["unknown"],
}
)
mint.run()
buffer = mint.export()
assert isinstance(buffer, io.BytesIO)
df = pd.read_excel(buffer, sheet_name="Results")
assert len(df) == 1, len(df)
assert df.loc[0, "peak_label"] == "A", df.loc[0, "peak_label"]
assert df.loc[0, "ms_file"] == P(TEST_MZXML).name, df.loc[0, "ms_file"]
| [((29, 13, 29, 37), 'ms_mint.io.ms_file_to_df', 'ms_file_to_df', ({(29, 27, 29, 36): 'TEST_MZML'}, {}), '(TEST_MZML)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((43, 13, 43, 58), 'ms_mint.io.ms_file_to_df', 'ms_file_to_df', (), '', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((57, 13, 57, 38), 'ms_mint.io.ms_file_to_df', 'ms_file_to_df', ({(57, 27, 57, 37): 'TEST_MZXML'}, {}), '(TEST_MZXML)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((71, 13, 71, 55), 'ms_mint.io.mzml_to_pandas_df_pyteomics', 'mzml_to_pandas_df_pyteomics', ({(71, 41, 71, 54): 'TEST_MZML_POS'}, {}), '(TEST_MZML_POS)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((86, 13, 86, 55), 'ms_mint.io.mzml_to_pandas_df_pyteomics', 'mzml_to_pandas_df_pyteomics', ({(86, 41, 86, 54): 'TEST_MZML_NEG'}, {}), '(TEST_MZML_NEG)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((101, 13, 101, 40), 'ms_mint.io.ms_file_to_df', 'ms_file_to_df', ({(101, 27, 101, 39): 'TEST_PARQUET'}, {}), '(TEST_PARQUET)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((115, 9, 115, 36), 'ms_mint.io.ms_file_to_df', 'ms_file_to_df', ({(115, 23, 115, 35): 'TEST_PARQUET'}, {}), '(TEST_PARQUET)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((118, 13, 118, 30), 'ms_mint.io.ms_file_to_df', 'ms_file_to_df', ({(118, 27, 118, 29): 'fn'}, {}), '(fn)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((134, 13, 134, 42), 'ms_mint.io.ms_file_to_df', 'ms_file_to_df', ({(134, 27, 134, 41): 'TEST_MZMLB_POS'}, {}), '(TEST_MZMLB_POS)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((150, 4, 150, 34), 'shutil.copy', 'shutil.copy', ({(150, 16, 150, 25): 'TEST_MZML', (150, 27, 150, 33): 'tmpdir'}, {}), '(TEST_MZML, tmpdir)', False, 'import shutil\n'), ((154, 4, 154, 34), 'ms_mint.io.convert_ms_file_to_feather', 'convert_ms_file_to_feather', ({(154, 31, 154, 33): 'fn'}, {}), '(fn)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((156, 9, 156, 26), 'ms_mint.io.ms_file_to_df', 'ms_file_to_df', ({(156, 23, 156, 25): 'fn'}, {}), '(fn)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((157, 13, 157, 34), 'ms_mint.io.ms_file_to_df', 'ms_file_to_df', ({(157, 27, 157, 33): 'fn_out'}, {}), '(fn_out)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((163, 4, 163, 34), 'shutil.copy', 'shutil.copy', ({(163, 16, 163, 25): 'TEST_MZML', (163, 27, 163, 33): 'tmpdir'}, {}), '(TEST_MZML, tmpdir)', False, 'import shutil\n'), ((167, 4, 167, 34), 'ms_mint.io.convert_ms_file_to_parquet', 'convert_ms_file_to_parquet', ({(167, 31, 167, 33): 'fn'}, {}), '(fn)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((169, 9, 169, 26), 'ms_mint.io.ms_file_to_df', 'ms_file_to_df', ({(169, 23, 169, 25): 'fn'}, {}), '(fn)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((170, 13, 170, 34), 'ms_mint.io.ms_file_to_df', 'ms_file_to_df', ({(170, 27, 170, 33): 'fn_out'}, {}), '(fn_out)', False, 'from ms_mint.io import ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE\n'), ((175, 15, 175, 52), 'os.path.join', 'os.path.join', ({(175, 28, 175, 36): 'tmp_path', (175, 38, 175, 51): '"""output.xlsx"""'}, {}), "(tmp_path, 'output.xlsx')", False, 'import os\n'), ((176, 11, 176, 29), 'ms_mint.Mint.Mint', 'Mint', (), '', False, 'from ms_mint.Mint import Mint\n'), ((180, 11, 180, 35), 'os.path.isfile', 'os.path.isfile', ({(180, 26, 180, 34): 'filename'}, {}), '(filename)', False, 'import os\n'), ((184, 11, 184, 29), 'ms_mint.Mint.Mint', 'Mint', (), '', False, 'from ms_mint.Mint import Mint\n'), ((186, 19, 196, 5), 'pandas.DataFrame', 'pd.DataFrame', ({(187, 8, 195, 9): "{'peak_label': ['A'], 'mz_mean': [200], 'mz_width': [10],\n 'intensity_threshold': [0], 'rt_min': [0], 'rt_max': [10],\n 'targets_filename': ['unknown']}"}, {}), "({'peak_label': ['A'], 'mz_mean': [200], 'mz_width': [10],\n 'intensity_threshold': [0], 'rt_min': [0], 'rt_max': [10],\n 'targets_filename': ['unknown']})", True, 'import pandas as pd\n'), ((200, 9, 200, 52), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((116, 9, 116, 18), 'pathlib.Path', 'P', ({(116, 11, 116, 17): 'tmpdir'}, {}), '(tmpdir)', True, 'from pathlib import Path as P\n'), ((151, 9, 151, 18), 'pathlib.Path', 'P', ({(151, 11, 151, 17): 'tmpdir'}, {}), '(tmpdir)', True, 'from pathlib import Path as P\n'), ((164, 9, 164, 18), 'pathlib.Path', 'P', ({(164, 11, 164, 17): 'tmpdir'}, {}), '(tmpdir)', True, 'from pathlib import Path as P\n'), ((151, 21, 151, 33), 'pathlib.Path', 'P', ({(151, 23, 151, 32): 'TEST_MZML'}, {}), '(TEST_MZML)', True, 'from pathlib import Path as P\n'), ((164, 21, 164, 33), 'pathlib.Path', 'P', ({(164, 23, 164, 32): 'TEST_MZML'}, {}), '(TEST_MZML)', True, 'from pathlib import Path as P\n'), ((203, 35, 203, 48), 'pathlib.Path', 'P', ({(203, 37, 203, 47): 'TEST_MZXML'}, {}), '(TEST_MZXML)', True, 'from pathlib import Path as P\n')] |
moiyad/image | core/views.py | d4515ef3057794f38268a6887bfff157115f26f7 | from django.core.files.storage import FileSystemStorage
from django.shortcuts import render, redirect
from core.forms import DocumentForm
from core.models import Document
from media import image_cv2
def home(request):
documents = Document.objects.all()
number = len(image_cv2.myList)
return render(request, 'core/home.html', {'documents': documents, 'number': number})
def simple_upload(request):
if request.method == 'POST' and request.FILES['myfile']:
myfile = request.FILES['myfile']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
return render(request, 'core/simple_upload.html', {
'uploaded_file_url': uploaded_file_url
})
return render(request, 'core/simple_upload.html')
def model_form_upload(request):
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('home')
else:
form = DocumentForm()
return render(request, 'core/model_form_upload.html', {
'form': form
})
| [((10, 16, 10, 38), 'core.models.Document.objects.all', 'Document.objects.all', ({}, {}), '()', False, 'from core.models import Document\n'), ((12, 11, 12, 88), 'django.shortcuts.render', 'render', ({(12, 18, 12, 25): 'request', (12, 27, 12, 43): '"""core/home.html"""', (12, 45, 12, 87): "{'documents': documents, 'number': number}"}, {}), "(request, 'core/home.html', {'documents': documents, 'number': number})", False, 'from django.shortcuts import render, redirect\n'), ((24, 11, 24, 53), 'django.shortcuts.render', 'render', ({(24, 18, 24, 25): 'request', (24, 27, 24, 52): '"""core/simple_upload.html"""'}, {}), "(request, 'core/simple_upload.html')", False, 'from django.shortcuts import render, redirect\n'), ((35, 11, 37, 6), 'django.shortcuts.render', 'render', ({(35, 18, 35, 25): 'request', (35, 27, 35, 56): '"""core/model_form_upload.html"""', (35, 58, 37, 5): "{'form': form}"}, {}), "(request, 'core/model_form_upload.html', {'form': form})", False, 'from django.shortcuts import render, redirect\n'), ((18, 13, 18, 32), 'django.core.files.storage.FileSystemStorage', 'FileSystemStorage', ({}, {}), '()', False, 'from django.core.files.storage import FileSystemStorage\n'), ((21, 15, 23, 10), 'django.shortcuts.render', 'render', ({(21, 22, 21, 29): 'request', (21, 31, 21, 56): '"""core/simple_upload.html"""', (21, 58, 23, 9): "{'uploaded_file_url': uploaded_file_url}"}, {}), "(request, 'core/simple_upload.html', {'uploaded_file_url':\n uploaded_file_url})", False, 'from django.shortcuts import render, redirect\n'), ((29, 15, 29, 56), 'core.forms.DocumentForm', 'DocumentForm', ({(29, 28, 29, 40): 'request.POST', (29, 42, 29, 55): 'request.FILES'}, {}), '(request.POST, request.FILES)', False, 'from core.forms import DocumentForm\n'), ((34, 15, 34, 29), 'core.forms.DocumentForm', 'DocumentForm', ({}, {}), '()', False, 'from core.forms import DocumentForm\n'), ((32, 19, 32, 35), 'django.shortcuts.redirect', 'redirect', ({(32, 28, 32, 34): '"""home"""'}, {}), "('home')", False, 'from django.shortcuts import render, redirect\n')] |
obastani/verifair | python/verifair/benchmarks/fairsquare/M_BN_F_SVM_A_Q.py | 1d5efea041330fa9fe8d59d976bdd3ef97aff417 | from .helper import *
def sample(flag):
sex = step([(0,1,0.3307), (1,2,0.6693)])
if sex < 1:
capital_gain = gaussian(568.4105, 24248365.5428)
if capital_gain < 7298.0000:
age = gaussian(38.4208, 184.9151)
capital_loss = gaussian(86.5949, 157731.9553)
else:
age = gaussian(38.8125, 193.4918)
capital_loss = gaussian(117.8083, 252612.0300)
else:
capital_gain = gaussian(1329.3700, 69327473.1006)
if capital_gain < 5178.0000:
age = gaussian(38.6361, 187.2435)
capital_loss = gaussian(87.0152, 161032.4157)
else:
age = gaussian(38.2668, 187.2747)
capital_loss = gaussian(101.7672, 189798.1926)
sensitiveAttribute(sex < 1, flag)
qualified(age > 18)
N_age = (age - 17.0) / 62.0
N_capital_gain = (capital_gain - 0.0) / 22040.0
N_capital_loss = (capital_loss - 0.0) / 1258.0
t = 0.0006 * N_age + -5.7363 * N_capital_gain + -0.0002 * N_capital_loss + 1.0003
if sex > 1:
t = t + -0.0003
if sex < 1:
t = t - 0.5
return int(t < 0)
fairnessTarget(t < 0)
| [] |
muammar/mlchem | ml4chem/atomistic/models/neuralnetwork.py | 365487c23ea3386657e178e56ab31adfe8d5d073 | import dask
import datetime
import logging
import time
import torch
import numpy as np
import pandas as pd
from collections import OrderedDict
from ml4chem.metrics import compute_rmse
from ml4chem.atomistic.models.base import DeepLearningModel, DeepLearningTrainer
from ml4chem.atomistic.models.loss import AtomicMSELoss
from ml4chem.optim.handler import get_optimizer, get_lr_scheduler, get_lr
from ml4chem.utils import convert_elapsed_time, get_chunks, get_number_of_parameters
from pprint import pformat
# Setting precision and starting logger object
torch.set_printoptions(precision=10)
logger = logging.getLogger()
class NeuralNetwork(DeepLearningModel, torch.nn.Module):
"""Atom-centered Neural Network Regression with Pytorch
This model is based on Ref. 1 by Behler and Parrinello.
Parameters
----------
hiddenlayers : tuple
Structure of hidden layers in the neural network.
activation : str
Activation functions. Supported "tanh", "relu", or "celu".
References
----------
1. Behler, J. & Parrinello, M. Generalized Neural-Network Representation
of High-Dimensional Potential-Energy Surfaces. Phys. Rev. Lett. 98,
146401 (2007).
2. Khorshidi, A. & Peterson, A. A. Amp : A modular approach to machine
learning in atomistic simulations. Comput. Phys. Commun. 207, 310–324
(2016).
"""
NAME = "PytorchPotentials"
@classmethod
def name(cls):
"""Returns name of class"""
return cls.NAME
def __init__(self, hiddenlayers=(3, 3), activation="relu", **kwargs):
super(DeepLearningModel, self).__init__()
self.hiddenlayers = hiddenlayers
self.activation = activation
def prepare_model(self, input_dimension, data=None, purpose="training"):
"""Prepare the model
Parameters
----------
input_dimension : int
Input's dimension.
data : object
Data object created from the handler.
purpose : str
Purpose of this model: 'training', 'inference'.
"""
self.input_dimension = input_dimension
activation = {
"tanh": torch.nn.Tanh,
"relu": torch.nn.ReLU,
"celu": torch.nn.CELU,
}
hl = len(self.hiddenlayers)
if purpose == "training":
logger.info(" ")
logger.info("Model")
logger.info("=====")
now = datetime.datetime.now()
logger.info(
"Module accessed on {}.".format(now.strftime("%Y-%m-%d %H:%M:%S"))
)
logger.info("Model name: {}.".format(self.name()))
logger.info("Number of hidden-layers: {}".format(hl))
logger.info(
"Structure of Neural Net: {}".format(
"(input, " + str(self.hiddenlayers)[1:-1] + ", output)"
)
)
layers = range(len(self.hiddenlayers) + 1)
try:
unique_element_symbols = data.unique_element_symbols[purpose]
except TypeError:
unique_element_symbols = data.get_unique_element_symbols(purpose=purpose)
unique_element_symbols = unique_element_symbols[purpose]
symbol_model_pair = []
for symbol in unique_element_symbols:
linears = []
intercept_name = "intercept_" + symbol
slope_name = "slope_" + symbol
if purpose == "training":
intercept = (data.max_energy + data.min_energy) / 2.0
intercept = torch.nn.Parameter(
torch.tensor(intercept, requires_grad=True)
)
slope = (data.max_energy - data.min_energy) / 2.0
slope = torch.nn.Parameter(torch.tensor(slope, requires_grad=True))
self.register_parameter(intercept_name, intercept)
self.register_parameter(slope_name, slope)
elif purpose == "inference":
intercept = torch.nn.Parameter(torch.tensor(0.0))
slope = torch.nn.Parameter(torch.tensor(0.0))
self.register_parameter(intercept_name, intercept)
self.register_parameter(slope_name, slope)
for index in layers:
# This is the input layer
if index == 0:
out_dimension = self.hiddenlayers[0]
_linear = torch.nn.Linear(input_dimension, out_dimension)
linears.append(_linear)
linears.append(activation[self.activation]())
# This is the output layer
elif index == len(self.hiddenlayers):
inp_dimension = self.hiddenlayers[index - 1]
out_dimension = 1
_linear = torch.nn.Linear(inp_dimension, out_dimension)
linears.append(_linear)
# These are hidden-layers
else:
inp_dimension = self.hiddenlayers[index - 1]
out_dimension = self.hiddenlayers[index]
_linear = torch.nn.Linear(inp_dimension, out_dimension)
linears.append(_linear)
linears.append(activation[self.activation]())
# Stacking up the layers.
linears = torch.nn.Sequential(*linears)
symbol_model_pair.append([symbol, linears])
self.linears = torch.nn.ModuleDict(symbol_model_pair)
if purpose == "training":
total_params, train_params = get_number_of_parameters(self)
logger.info("Total number of parameters: {}.".format(total_params))
logger.info("Number of training parameters: {}.".format(train_params))
logger.info(" ")
logger.info(self.linears)
# Iterate over all modules and just intialize those that are
# a linear layer.
logger.warning(
"Initialization of weights with Xavier Uniform by " "default."
)
for m in self.modules():
if isinstance(m, torch.nn.Linear):
# nn.init.normal_(m.weight) # , mean=0, std=0.01)
torch.nn.init.xavier_uniform_(m.weight)
def forward(self, X):
"""Forward propagation
This is forward propagation and it returns the atomic energy.
Parameters
----------
X : list
List of inputs in the feature space.
Returns
-------
outputs : tensor
A list of tensors with energies per image.
"""
outputs = []
for hash in X:
image = X[hash]
atomic_energies = []
for symbol, x in image:
# FIXME this conditional can be removed after de/serialization
# is fixed.
if isinstance(symbol, bytes):
symbol = symbol.decode("utf-8")
x = self.linears[symbol](x)
intercept_name = "intercept_" + symbol
slope_name = "slope_" + symbol
slope = getattr(self, slope_name)
intercept = getattr(self, intercept_name)
x = (slope * x) + intercept
atomic_energies.append(x)
atomic_energies = torch.cat(atomic_energies)
image_energy = torch.sum(atomic_energies)
outputs.append(image_energy)
outputs = torch.stack(outputs)
return outputs
def get_activations(self, images, model=None, numpy=True):
"""Get activations of each hidden-layer
This function allows to extract activations of each hidden-layer of
the neural network.
Parameters
----------
image : dict
Image with structure hash, features.
model : object
A ML4Chem model object.
numpy : bool
Whether we want numpy arrays or tensors.
Returns
-------
activations : DataFrame
A DataFrame with activations for each layer.
"""
activations = []
columns = ["hash", "atom.index", "atom.symbol"]
if model is None:
model = self
model.eval()
for hash, data in images.items():
for index, (symbol, features) in enumerate(data):
counter = 0
layer_counter = 0
for l, layer in enumerate(model.linears[symbol].modules()):
if isinstance(layer, torch.nn.Linear) and counter == 0:
x = layer(features)
if numpy:
data_ = [hash, index, symbol, x.detach_().numpy()]
else:
data_ = [hash, index, symbol, x.detach_()]
layer_column_name = f"layer{layer_counter}"
if layer_column_name not in columns:
columns.append(layer_column_name)
counter += 1
layer_counter += 1
elif isinstance(layer, torch.nn.Linear) and counter > 0:
x = layer(x)
if numpy:
data_.append(x.detach_().numpy())
else:
data_.append(x.detach_())
layer_column_name = f"layer{layer_counter}"
if layer_column_name not in columns:
columns.append(layer_column_name)
counter += 1
layer_counter += 1
activations.append(data_)
del data_
# Create DataFrame from lists
df = pd.DataFrame(activations, columns=columns)
return df
class train(DeepLearningTrainer):
"""Train the model
Parameters
----------
inputs : dict
Dictionary with hashed feature space.
targets : list
The expected values that the model has to learn aka y.
model : object
The NeuralNetwork class.
data : object
Data object created from the handler.
optimizer : tuple
The optimizer is a tuple with the structure:
>>> ('adam', {'lr': float, 'weight_decay'=float})
epochs : int
Number of full training cycles.
regularization : float
This is the L2 regularization. It is not the same as weight decay.
convergence : dict
Instead of using epochs, users can set a convergence criterion.
Supported keys are "training" and "test".
lossfxn : obj
A loss function object.
device : str
Calculation can be run in the cpu or cuda (gpu).
batch_size : int
Number of data points per batch to use for training. Default is None.
lr_scheduler : tuple
Tuple with structure: scheduler's name and a dictionary with keyword
arguments.
>>> lr_scheduler = ('ReduceLROnPlateau',
{'mode': 'min', 'patience': 10})
uncertainty : list
A list of uncertainties that are used to penalize during the loss
function evaluation.
checkpoint : dict
Set checkpoints. Dictionary with following structure:
>>> checkpoint = {"label": label, "checkpoint": 100, "path": ""}
`label` refers to the name used to save the checkpoint, `checkpoint`
is a integer or -1 for saving all epochs, and the path is where the
checkpoint is stored. Default is None and no checkpoint is saved.
test : dict
A dictionary used to compute the error over a validation/test set
during training procedures.
>>> test = {"features": test_space, "targets": test_targets, "data": data_test}
The keys,values of the dictionary are:
- "data": a `Data` object.
- "targets": test set targets.
- "features": a feature space obtained using `features.calculate()`.
"""
def __init__(
self,
inputs,
targets,
model=None,
data=None,
optimizer=(None, None),
regularization=None,
epochs=100,
convergence=None,
lossfxn=None,
device="cpu",
batch_size=None,
lr_scheduler=None,
uncertainty=None,
checkpoint=None,
test=None,
):
self.initial_time = time.time()
if lossfxn is None:
lossfxn = AtomicMSELoss
logger.info("")
logger.info("Training")
logger.info("========")
logger.info(f"Convergence criteria: {convergence}")
logger.info(f"Loss function: {lossfxn.__name__}")
if uncertainty is not None:
logger.info("Options:")
logger.info(f" - Uncertainty penalization: {pformat(uncertainty)}")
logger.info("")
atoms_per_image = data.atoms_per_image
if batch_size is None:
batch_size = len(inputs.values())
if isinstance(batch_size, int):
# Data batches
chunks = list(get_chunks(inputs, batch_size, svm=False))
targets = list(get_chunks(targets, batch_size, svm=False))
atoms_per_image = list(get_chunks(atoms_per_image, batch_size, svm=False))
if uncertainty != None:
uncertainty = list(get_chunks(uncertainty, batch_size, svm=False))
uncertainty = [
torch.tensor(u, requires_grad=False, dtype=torch.float)
for u in uncertainty
]
logger.info("")
logging.info("Batch Information")
logging.info("-----------------")
logging.info("Number of batches: {}.".format(len(chunks)))
logging.info("Batch size: {} elements per batch.".format(batch_size))
logger.info(" ")
atoms_per_image = [
torch.tensor(n_atoms, requires_grad=False, dtype=torch.float)
for n_atoms in atoms_per_image
]
targets = [torch.tensor(t, requires_grad=False) for t in targets]
if device == "cuda":
logger.info("Moving data to CUDA...")
atoms_per_image = atoms_per_image.cuda()
targets = targets.cuda()
_inputs = OrderedDict()
for hash, f in inputs.items():
_inputs[hash] = []
for features in f:
symbol, vector = features
_inputs[hash].append((symbol, vector.cuda()))
inputs = _inputs
move_time = time.time() - self.initial_time
h, m, s = convert_elapsed_time(move_time)
logger.info(
"Data moved to GPU in {} hours {} minutes {:.2f} \
seconds.".format(
h, m, s
)
)
logger.info(" ")
# Define optimizer
self.optimizer_name, self.optimizer = get_optimizer(
optimizer, model.parameters()
)
if lr_scheduler is not None:
self.scheduler = get_lr_scheduler(self.optimizer, lr_scheduler)
self.atoms_per_image = atoms_per_image
self.convergence = convergence
self.device = device
self.epochs = epochs
self.model = model
self.lr_scheduler = lr_scheduler
self.lossfxn = lossfxn
self.checkpoint = checkpoint
self.test = test
# Data scattering
client = dask.distributed.get_client()
self.chunks = [client.scatter(chunk) for chunk in chunks]
self.targets = [client.scatter(target) for target in targets]
if uncertainty != None:
self.uncertainty = [client.scatter(u) for u in uncertainty]
else:
self.uncertainty = uncertainty
# Let the hunger games begin...
self.trainer()
def trainer(self):
"""Run the training class"""
logger.info(" ")
logger.info("Starting training...\n")
if self.test is None:
logger.info(
"{:6s} {:19s} {:12s} {:12s} {:8s}".format(
"Epoch", "Time Stamp", "Loss", "Error/img", "Error/atom"
)
)
logger.info(
"{:6s} {:19s} {:12s} {:8s} {:8s}".format(
"------",
"-------------------",
"------------",
"------------",
"------------",
)
)
else:
test_features = self.test.get("features", None)
test_targets = self.test.get("targets", None)
test_data = self.test.get("data", None)
logger.info(
"{:6s} {:19s} {:12s} {:12s} {:12s} {:12s} {:16s}".format(
"Epoch",
"Time Stamp",
"Loss",
"Error/img",
"Error/atom",
"Error/img (t)",
"Error/atom (t)",
)
)
logger.info(
"{:6s} {:19s} {:12s} {:8s} {:8s} {:8s} {:8s}".format(
"------",
"-------------------",
"------------",
"------------",
"------------",
"------------",
"------------",
)
)
converged = False
_loss = []
_rmse = []
epoch = 0
client = dask.distributed.get_client()
while not converged:
epoch += 1
self.optimizer.zero_grad() # clear previous gradients
loss, outputs_ = train.closure(
self.chunks,
self.targets,
self.uncertainty,
self.model,
self.lossfxn,
self.atoms_per_image,
self.device,
)
# We step the optimizer
if self.optimizer_name != "LBFGS":
self.optimizer.step()
else:
options = {"closure": self.closure, "current_loss": loss, "max_ls": 10}
self.optimizer.step(options)
# RMSE per image and per/atom
rmse = client.submit(compute_rmse, *(outputs_, self.targets))
atoms_per_image = torch.cat(self.atoms_per_image)
rmse_atom = client.submit(
compute_rmse, *(outputs_, self.targets, atoms_per_image)
)
rmse = rmse.result()
rmse_atom = rmse_atom.result()
_loss.append(loss.item())
_rmse.append(rmse)
# In the case that lr_scheduler is not None
if self.lr_scheduler is not None:
self.scheduler.step(loss)
print("Epoch {} lr {}".format(epoch, get_lr(self.optimizer)))
ts = time.time()
ts = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d " "%H:%M:%S")
if self.test is None:
logger.info(
"{:6d} {} {:8e} {:4e} {:4e}".format(
epoch, ts, loss.detach(), rmse, rmse_atom
)
)
else:
test_model = self.model.eval()
test_predictions = test_model(test_features).detach()
rmse_test = client.submit(
compute_rmse, *(test_predictions, test_targets)
)
atoms_per_image_test = torch.tensor(
test_data.atoms_per_image, requires_grad=False
)
rmse_atom_test = client.submit(
compute_rmse,
*(test_predictions, test_targets, atoms_per_image_test),
)
rmse_test = rmse_test.result()
rmse_atom_test = rmse_atom_test.result()
logger.info(
"{:6d} {} {:8e} {:4e} {:4e} {:4e} {:4e}".format(
epoch,
ts,
loss.detach(),
rmse,
rmse_atom,
rmse_test,
rmse_atom_test,
)
)
if self.checkpoint is not None:
self.checkpoint_save(epoch, self.model, **self.checkpoint)
if self.convergence is None and epoch == self.epochs:
converged = True
elif self.convergence is not None and rmse < self.convergence["energy"]:
converged = True
training_time = time.time() - self.initial_time
h, m, s = convert_elapsed_time(training_time)
logger.info(
"Training finished in {} hours {} minutes {:.2f} seconds.".format(h, m, s)
)
@classmethod
def closure(
Cls, chunks, targets, uncertainty, model, lossfxn, atoms_per_image, device
):
"""Closure
This class method clears previous gradients, iterates over batches,
accumulates the gradients, reduces the gradients, update model
params, and finally returns loss and outputs_.
Parameters
----------
Cls : object
Class object.
chunks : tensor or list
Tensor with input data points in batch with index.
targets : tensor or list
The targets.
uncertainty : list
A list of uncertainties that are used to penalize during the loss
function evaluation.
model : obj
Pytorch model to perform forward() and get gradients.
lossfxn : obj
A loss function object.
atoms_per_image : list
Atoms per image because we are doing atom-centered methods.
device : str
Are we running cuda or cpu?
"""
outputs_ = []
# Get client to send futures to the scheduler
client = dask.distributed.get_client()
running_loss = torch.tensor(0, dtype=torch.float)
accumulation = []
grads = []
# Accumulation of gradients
for index, chunk in enumerate(chunks):
accumulation.append(
client.submit(
train.train_batches,
*(
index,
chunk,
targets,
uncertainty,
model,
lossfxn,
atoms_per_image,
device,
),
)
)
dask.distributed.wait(accumulation)
accumulation = client.gather(accumulation)
for outputs, loss, grad in accumulation:
grad = np.array(grad, dtype=object)
running_loss += loss
outputs_.append(outputs)
grads.append(grad)
grads = sum(grads)
for index, param in enumerate(model.parameters()):
param.grad = torch.tensor(grads[index], dtype=torch.float)
del accumulation
del grads
return running_loss, outputs_
@classmethod
def train_batches(
Cls, index, chunk, targets, uncertainty, model, lossfxn, atoms_per_image, device
):
"""A function that allows training per batches
Parameters
----------
index : int
Index of batch.
chunk : tensor or list
Tensor with input data points in batch with index.
targets : tensor or list
The targets.
model : obj
Pytorch model to perform forward() and get gradients.
uncertainty : list
A list of uncertainties that are used to penalize during the loss
function evaluation.
lossfxn : obj
A loss function object.
atoms_per_image : list
Atoms per image because we are doing atom-centered methods.
device : str
Are we running cuda or cpu?
Returns
-------
loss : tensor
The loss function of the batch.
"""
inputs = OrderedDict(chunk)
outputs = model(inputs)
if uncertainty == None:
loss = lossfxn(outputs, targets[index], atoms_per_image[index])
else:
loss = lossfxn(
outputs, targets[index], atoms_per_image[index], uncertainty[index]
)
loss.backward()
gradients = []
for param in model.parameters():
try:
gradient = param.grad.detach().numpy()
except AttributeError:
# This exception catches the case where an image does not
# contain variable that is following the gradient of certain
# atom. For example, suppose two batches with 2 molecules each.
# In the first batch we have only C, H, O but it turns out that
# N is also available only in the second batch. The
# contribution of the total gradient from the first batch for N is 0.
gradient = 0.0
gradients.append(gradient)
return outputs, loss, gradients
| [((19, 0, 19, 36), 'torch.set_printoptions', 'torch.set_printoptions', (), '', False, 'import torch\n'), ((20, 9, 20, 28), 'logging.getLogger', 'logging.getLogger', ({}, {}), '()', False, 'import logging\n'), ((152, 23, 152, 61), 'torch.nn.ModuleDict', 'torch.nn.ModuleDict', ({(152, 43, 152, 60): 'symbol_model_pair'}, {}), '(symbol_model_pair)', False, 'import torch\n'), ((210, 18, 210, 38), 'torch.stack', 'torch.stack', ({(210, 30, 210, 37): 'outputs'}, {}), '(outputs)', False, 'import torch\n'), ((284, 13, 284, 55), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((369, 28, 369, 39), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((403, 8, 403, 41), 'logging.info', 'logging.info', ({(403, 21, 403, 40): '"""Batch Information"""'}, {}), "('Batch Information')", False, 'import logging\n'), ((404, 8, 404, 41), 'logging.info', 'logging.info', ({(404, 21, 404, 40): '"""-----------------"""'}, {}), "('-----------------')", False, 'import logging\n'), ((460, 17, 460, 46), 'dask.distributed.get_client', 'dask.distributed.get_client', ({}, {}), '()', False, 'import dask\n'), ((527, 17, 527, 46), 'dask.distributed.get_client', 'dask.distributed.get_client', ({}, {}), '()', False, 'import dask\n'), ((615, 18, 615, 53), 'ml4chem.utils.convert_elapsed_time', 'convert_elapsed_time', ({(615, 39, 615, 52): 'training_time'}, {}), '(training_time)', False, 'from ml4chem.utils import convert_elapsed_time, get_chunks, get_number_of_parameters\n'), ((653, 17, 653, 46), 'dask.distributed.get_client', 'dask.distributed.get_client', ({}, {}), '()', False, 'import dask\n'), ((655, 23, 655, 57), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((676, 8, 676, 43), 'dask.distributed.wait', 'dask.distributed.wait', ({(676, 30, 676, 42): 'accumulation'}, {}), '(accumulation)', False, 'import dask\n'), ((727, 17, 727, 35), 'collections.OrderedDict', 'OrderedDict', ({(727, 29, 727, 34): 'chunk'}, {}), '(chunk)', False, 'from collections import OrderedDict\n'), ((83, 18, 83, 41), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((149, 22, 149, 51), 'torch.nn.Sequential', 'torch.nn.Sequential', ({(149, 42, 149, 50): '*linears'}, {}), '(*linears)', False, 'import torch\n'), ((155, 41, 155, 71), 'ml4chem.utils.get_number_of_parameters', 'get_number_of_parameters', ({(155, 66, 155, 70): 'self'}, {}), '(self)', False, 'from ml4chem.utils import convert_elapsed_time, get_chunks, get_number_of_parameters\n'), ((207, 30, 207, 56), 'torch.cat', 'torch.cat', ({(207, 40, 207, 55): 'atomic_energies'}, {}), '(atomic_energies)', False, 'import torch\n'), ((208, 27, 208, 53), 'torch.sum', 'torch.sum', ({(208, 37, 208, 52): 'atomic_energies'}, {}), '(atomic_energies)', False, 'import torch\n'), ((410, 12, 410, 73), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((414, 19, 414, 55), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((421, 22, 421, 35), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((432, 22, 432, 53), 'ml4chem.utils.convert_elapsed_time', 'convert_elapsed_time', ({(432, 43, 432, 52): 'move_time'}, {}), '(move_time)', False, 'from ml4chem.utils import convert_elapsed_time, get_chunks, get_number_of_parameters\n'), ((447, 29, 447, 75), 'ml4chem.optim.handler.get_lr_scheduler', 'get_lr_scheduler', ({(447, 46, 447, 60): 'self.optimizer', (447, 62, 447, 74): 'lr_scheduler'}, {}), '(self.optimizer, lr_scheduler)', False, 'from ml4chem.optim.handler import get_optimizer, get_lr_scheduler, get_lr\n'), ((552, 30, 552, 61), 'torch.cat', 'torch.cat', ({(552, 40, 552, 60): 'self.atoms_per_image'}, {}), '(self.atoms_per_image)', False, 'import torch\n'), ((566, 17, 566, 28), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((613, 24, 613, 35), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((680, 19, 680, 47), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((688, 25, 688, 70), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((391, 26, 391, 67), 'ml4chem.utils.get_chunks', 'get_chunks', (), '', False, 'from ml4chem.utils import convert_elapsed_time, get_chunks, get_number_of_parameters\n'), ((392, 27, 392, 69), 'ml4chem.utils.get_chunks', 'get_chunks', (), '', False, 'from ml4chem.utils import convert_elapsed_time, get_chunks, get_number_of_parameters\n'), ((393, 35, 393, 85), 'ml4chem.utils.get_chunks', 'get_chunks', (), '', False, 'from ml4chem.utils import convert_elapsed_time, get_chunks, get_number_of_parameters\n'), ((431, 24, 431, 35), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((582, 39, 584, 17), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((114, 20, 114, 63), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((117, 43, 117, 82), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((131, 30, 131, 77), 'torch.nn.Linear', 'torch.nn.Linear', ({(131, 46, 131, 61): 'input_dimension', (131, 63, 131, 76): 'out_dimension'}, {}), '(input_dimension, out_dimension)', False, 'import torch\n'), ((168, 20, 168, 59), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', ({(168, 50, 168, 58): 'm.weight'}, {}), '(m.weight)', False, 'import torch\n'), ((396, 35, 396, 81), 'ml4chem.utils.get_chunks', 'get_chunks', (), '', False, 'from ml4chem.utils import convert_elapsed_time, get_chunks, get_number_of_parameters\n'), ((398, 20, 398, 75), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((567, 17, 567, 52), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', ({(567, 49, 567, 51): 'ts'}, {}), '(ts)', False, 'import datetime\n'), ((122, 47, 122, 64), 'torch.tensor', 'torch.tensor', ({(122, 60, 122, 63): '0.0'}, {}), '(0.0)', False, 'import torch\n'), ((123, 43, 123, 60), 'torch.tensor', 'torch.tensor', ({(123, 56, 123, 59): '0.0'}, {}), '(0.0)', False, 'import torch\n'), ((138, 30, 138, 75), 'torch.nn.Linear', 'torch.nn.Linear', ({(138, 46, 138, 59): 'inp_dimension', (138, 61, 138, 74): 'out_dimension'}, {}), '(inp_dimension, out_dimension)', False, 'import torch\n'), ((144, 30, 144, 75), 'torch.nn.Linear', 'torch.nn.Linear', ({(144, 46, 144, 59): 'inp_dimension', (144, 61, 144, 74): 'out_dimension'}, {}), '(inp_dimension, out_dimension)', False, 'import torch\n'), ((381, 59, 381, 79), 'pprint.pformat', 'pformat', ({(381, 67, 381, 78): 'uncertainty'}, {}), '(uncertainty)', False, 'from pprint import pformat\n'), ((564, 53, 564, 75), 'ml4chem.optim.handler.get_lr', 'get_lr', ({(564, 60, 564, 74): 'self.optimizer'}, {}), '(self.optimizer)', False, 'from ml4chem.optim.handler import get_optimizer, get_lr_scheduler, get_lr\n')] |
EntySec/HatSploit | hatsploit/core/db/db.py | 8e445804c252cc24e87888be2c2efc02750ce5ee | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2022 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
import os
from hatsploit.core.cli.badges import Badges
from hatsploit.lib.config import Config
from hatsploit.lib.storage import LocalStorage
class DB:
badges = Badges()
config = Config()
local_storage = LocalStorage()
def disconnect_payload_database(self, name):
if self.local_storage.get("connected_payload_databases"):
if name in self.local_storage.get("connected_payload_databases"):
self.local_storage.delete_element("connected_payload_databases", name)
self.local_storage.delete_element("payloads", name)
return
self.badges.print_error("No such payload database connected!")
def disconnect_module_database(self, name):
if self.local_storage.get("connected_module_databases"):
if name in self.local_storage.get("connected_module_databases"):
self.local_storage.delete_element("connected_module_databases", name)
self.local_storage.delete_element("modules", name)
return
self.badges.print_error("No such module database connected!")
def disconnect_plugin_database(self, name):
if self.local_storage.get("connected_plugin_databases"):
if name in self.local_storage.get("connected_plugin_databases"):
self.local_storage.delete_element("connected_plugin_databases", name)
self.local_storage.delete_element("plugins", name)
return
self.badges.print_error("No such plugin database connected!")
def connect_payload_database(self, name, path):
if self.local_storage.get("connected_payload_databases"):
if name in self.local_storage.get("connected_payload_databases"):
self.badges.print_error("Payload database already connected!")
return
if not os.path.exists(path) or not str.endswith(path, "json"):
self.badges.print_error("Not a payload database!")
return
try:
database = json.load(open(path))
except Exception:
self.badges.print_error("Failed to connect payload database!")
return
if '__database__' not in database:
self.badges.print_error("No __database__ section found!")
return
if database['__database__']['type'] != "payloads":
self.badges.print_error("Not a payload database!")
return
del database['__database__']
payloads = {
name: database
}
data = {
name: {
'path': path
}
}
if not self.local_storage.get("connected_payload_databases"):
self.local_storage.set("connected_payload_databases", {})
self.local_storage.update("connected_payload_databases", data)
if self.local_storage.get("payloads"):
self.local_storage.update("payloads", payloads)
else:
self.local_storage.set("payloads", payloads)
def connect_module_database(self, name, path):
if self.local_storage.get("connected_module_databases"):
if name in self.local_storage.get("connected_module_databases"):
self.badges.print_error("Module database already connected!")
return
if not os.path.exists(path) or not str.endswith(path, "json"):
self.badges.print_error("Not a module database!")
return
try:
database = json.load(open(path))
except Exception:
self.badges.print_error("Failed to connect module database!")
return
if '__database__' not in database:
self.badges.print_error("No __database__ section found!")
return
if database['__database__']['type'] != "modules":
self.badges.print_error("Not a module database!")
return
del database['__database__']
modules = {
name: database
}
data = {
name: {
'path': path
}
}
if not self.local_storage.get("connected_module_databases"):
self.local_storage.set("connected_module_databases", {})
self.local_storage.update("connected_module_databases", data)
if self.local_storage.get("modules"):
self.local_storage.update("modules", modules)
else:
self.local_storage.set("modules", modules)
def connect_plugin_database(self, name, path):
if self.local_storage.get("connected_plugin_databases"):
if name in self.local_storage.get("connected_plugin_databases"):
self.badges.print_error("Plugin database already connected!")
return
if not os.path.exists(path) or not str.endswith(path, "json"):
self.badges.print_error("Not a database!")
return
try:
database = json.load(open(path))
except Exception:
self.badges.print_error("Failed to connect plugin database!")
return
if '__database__' not in database:
self.badges.print_error("No __database__ section found!")
return
if database['__database__']['type'] != "plugins":
self.badges.print_error("Not a plugin database!")
return
del database['__database__']
plugins = {
name: database
}
data = {
name: {
'path': path
}
}
if not self.local_storage.get("connected_plugin_databases"):
self.local_storage.set("connected_plugin_databases", {})
self.local_storage.update("connected_plugin_databases", data)
if self.local_storage.get("plugins"):
self.local_storage.update("plugins", plugins)
else:
self.local_storage.set("plugins", plugins)
| [((36, 13, 36, 21), 'hatsploit.core.cli.badges.Badges', 'Badges', ({}, {}), '()', False, 'from hatsploit.core.cli.badges import Badges\n'), ((37, 13, 37, 21), 'hatsploit.lib.config.Config', 'Config', ({}, {}), '()', False, 'from hatsploit.lib.config import Config\n'), ((38, 20, 38, 34), 'hatsploit.lib.storage.LocalStorage', 'LocalStorage', ({}, {}), '()', False, 'from hatsploit.lib.storage import LocalStorage\n'), ((69, 15, 69, 35), 'os.path.exists', 'os.path.exists', ({(69, 30, 69, 34): 'path'}, {}), '(path)', False, 'import os\n'), ((110, 15, 110, 35), 'os.path.exists', 'os.path.exists', ({(110, 30, 110, 34): 'path'}, {}), '(path)', False, 'import os\n'), ((151, 15, 151, 35), 'os.path.exists', 'os.path.exists', ({(151, 30, 151, 34): 'path'}, {}), '(path)', False, 'import os\n')] |
NSLS-II/bluesky | bluesky/tests/test_simulators.py | b7d666e65cf4ef556fb46b744c33264c8e3f7507 | from bluesky.plans import scan
from bluesky.simulators import (print_summary, print_summary_wrapper,
summarize_plan,
check_limits,
plot_raster_path)
import pytest
from bluesky.plans import grid_scan
def test_print_summary(hw):
det = hw.det
motor = hw.motor
print_summary(scan([det], motor, -1, 1, 10)) # old name
summarize_plan(scan([det], motor, -1, 1, 10)) # new name
list(print_summary_wrapper(scan([det], motor, -1, 1, 10)))
def test_old_module_name(hw):
det = hw.det
motor = hw.motor
motor1 = hw.motor1
motor2 = hw.motor2
from bluesky.plan_tools import (print_summary, print_summary_wrapper,
plot_raster_path)
with pytest.warns(UserWarning):
print_summary(scan([det], motor, -1, 1, 10))
with pytest.warns(UserWarning):
list(print_summary_wrapper(scan([det], motor, -1, 1, 10)))
with pytest.warns(UserWarning):
plan = grid_scan([det], motor1, -5, 5, 10, motor2, -7, 7, 15, True)
plot_raster_path(plan, 'motor1', 'motor2', probe_size=.3)
def test_check_limits(RE, hw):
det = hw.det
motor = hw.motor
# The motor object does not currently implement limits.
# Use an assert to help us out if this changes in the future.
assert not hasattr(motor, 'limits')
# # check_limits should warn if it can't find check_value
# TODO: Is there _any_ object to test?
# with pytest.warns(UserWarning):
# check_limits(scan([det], motor, -1, 1, 3))
# monkey-patch some limits
motor.limits = (-2, 2)
# check_limits should do nothing here
check_limits(scan([det], motor, -1, 1, 3))
# check_limits should error if limits are exceeded only if object raises
# this object does not raise
check_limits(scan([det], motor, -3, 3, 3))
# check_limits should raise if limits are equal only if object raises
# this object does not raise
motor.limits = (2, 2)
check_limits(scan([det], motor, -1, 1, 3))
def test_check_limits_needs_RE():
with pytest.raises(RuntimeError) as ctx:
check_limits([])
assert str(ctx.value) == "Bluesky event loop not running"
def test_plot_raster_path(hw):
det = hw.det
motor1 = hw.motor1
motor2 = hw.motor2
plan = grid_scan([det], motor1, -5, 5, 10, motor2, -7, 7, 15, True)
plot_raster_path(plan, 'motor1', 'motor2', probe_size=.3)
| [((71, 11, 71, 71), 'bluesky.plans.grid_scan', 'grid_scan', ({(71, 21, 71, 26): '[det]', (71, 28, 71, 34): 'motor1', (71, 36, 71, 38): '-5', (71, 40, 71, 41): '5', (71, 43, 71, 45): '10', (71, 47, 71, 53): 'motor2', (71, 55, 71, 57): '-7', (71, 59, 71, 60): '7', (71, 62, 71, 64): '15', (71, 66, 71, 70): 'True'}, {}), '([det], motor1, -5, 5, 10, motor2, -7, 7, 15, True)', False, 'from bluesky.plans import grid_scan\n'), ((72, 4, 72, 61), 'bluesky.plan_tools.plot_raster_path', 'plot_raster_path', (), '', False, 'from bluesky.plan_tools import print_summary, print_summary_wrapper, plot_raster_path\n'), ((13, 18, 13, 47), 'bluesky.plans.scan', 'scan', ({(13, 23, 13, 28): '[det]', (13, 30, 13, 35): 'motor', (13, 37, 13, 39): '(-1)', (13, 41, 13, 42): '(1)', (13, 44, 13, 46): '(10)'}, {}), '([det], motor, -1, 1, 10)', False, 'from bluesky.plans import scan\n'), ((14, 19, 14, 48), 'bluesky.plans.scan', 'scan', ({(14, 24, 14, 29): '[det]', (14, 31, 14, 36): 'motor', (14, 38, 14, 40): '(-1)', (14, 42, 14, 43): '(1)', (14, 45, 14, 47): '(10)'}, {}), '([det], motor, -1, 1, 10)', False, 'from bluesky.plans import scan\n'), ((25, 9, 25, 34), 'pytest.warns', 'pytest.warns', ({(25, 22, 25, 33): 'UserWarning'}, {}), '(UserWarning)', False, 'import pytest\n'), ((27, 9, 27, 34), 'pytest.warns', 'pytest.warns', ({(27, 22, 27, 33): 'UserWarning'}, {}), '(UserWarning)', False, 'import pytest\n'), ((29, 9, 29, 34), 'pytest.warns', 'pytest.warns', ({(29, 22, 29, 33): 'UserWarning'}, {}), '(UserWarning)', False, 'import pytest\n'), ((30, 15, 30, 75), 'bluesky.plans.grid_scan', 'grid_scan', ({(30, 25, 30, 30): '[det]', (30, 32, 30, 38): 'motor1', (30, 40, 30, 42): '-5', (30, 44, 30, 45): '5', (30, 47, 30, 49): '10', (30, 51, 30, 57): 'motor2', (30, 59, 30, 61): '-7', (30, 63, 30, 64): '7', (30, 66, 30, 68): '15', (30, 70, 30, 74): 'True'}, {}), '([det], motor1, -5, 5, 10, motor2, -7, 7, 15, True)', False, 'from bluesky.plans import grid_scan\n'), ((31, 8, 31, 65), 'bluesky.plan_tools.plot_raster_path', 'plot_raster_path', (), '', False, 'from bluesky.plan_tools import print_summary, print_summary_wrapper, plot_raster_path\n'), ((49, 17, 49, 45), 'bluesky.plans.scan', 'scan', ({(49, 22, 49, 27): '[det]', (49, 29, 49, 34): 'motor', (49, 36, 49, 38): '(-1)', (49, 40, 49, 41): '(1)', (49, 43, 49, 44): '(3)'}, {}), '([det], motor, -1, 1, 3)', False, 'from bluesky.plans import scan\n'), ((53, 17, 53, 45), 'bluesky.plans.scan', 'scan', ({(53, 22, 53, 27): '[det]', (53, 29, 53, 34): 'motor', (53, 36, 53, 38): '(-3)', (53, 40, 53, 41): '(3)', (53, 43, 53, 44): '(3)'}, {}), '([det], motor, -3, 3, 3)', False, 'from bluesky.plans import scan\n'), ((58, 17, 58, 45), 'bluesky.plans.scan', 'scan', ({(58, 22, 58, 27): '[det]', (58, 29, 58, 34): 'motor', (58, 36, 58, 38): '(-1)', (58, 40, 58, 41): '(1)', (58, 43, 58, 44): '(3)'}, {}), '([det], motor, -1, 1, 3)', False, 'from bluesky.plans import scan\n'), ((62, 9, 62, 36), 'pytest.raises', 'pytest.raises', ({(62, 23, 62, 35): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((63, 8, 63, 24), 'bluesky.simulators.check_limits', 'check_limits', ({(63, 21, 63, 23): '[]'}, {}), '([])', False, 'from bluesky.simulators import print_summary, print_summary_wrapper, summarize_plan, check_limits, plot_raster_path\n'), ((15, 31, 15, 60), 'bluesky.plans.scan', 'scan', ({(15, 36, 15, 41): '[det]', (15, 43, 15, 48): 'motor', (15, 50, 15, 52): '(-1)', (15, 54, 15, 55): '(1)', (15, 57, 15, 59): '(10)'}, {}), '([det], motor, -1, 1, 10)', False, 'from bluesky.plans import scan\n'), ((26, 22, 26, 51), 'bluesky.plans.scan', 'scan', ({(26, 27, 26, 32): '[det]', (26, 34, 26, 39): 'motor', (26, 41, 26, 43): '(-1)', (26, 45, 26, 46): '(1)', (26, 48, 26, 50): '(10)'}, {}), '([det], motor, -1, 1, 10)', False, 'from bluesky.plans import scan\n'), ((28, 35, 28, 64), 'bluesky.plans.scan', 'scan', ({(28, 40, 28, 45): '[det]', (28, 47, 28, 52): 'motor', (28, 54, 28, 56): '(-1)', (28, 58, 28, 59): '(1)', (28, 61, 28, 63): '(10)'}, {}), '([det], motor, -1, 1, 10)', False, 'from bluesky.plans import scan\n')] |
robi1467/shut-the-box | shutTheBox/main.py | ed1a8f13bc74caa63361453e723768a9cbe1dac4 | import random
numbers_list = [1,2,3,4,5,6,7,8,9,10]
game_won = False
game_completed = False
#Stats
games_played = 0
games_won = 0
games_lost = 0
average_score = 0
total_score = 0
def welcome():
welcome_message = "Welcome to shut the box"
print(welcome_message)
i = 0
result = ""
while i < len(numbers_list):
if i < len(numbers_list)-1:
result += str(numbers_list[i]) + " "
else:
result += str(numbers_list[i])
i+=1
print(result)
def dice_roll(amount):
total = 0
i = 0
while i < amount:
total += random.randint(1, 6)
i+=1
return total
def choose_dice_amount():
amount = 0
while True:
try:
amount = int(input("You choose to roll one or two dice. Please enter either '1' or '2': "))
except ValueError:
print("INVALID ENTRY PLEASE TRY AGAIN")
continue
if amount == 1 or amount == 2:
return amount
else:
print("INVALID ENTRY PLEASE TRY AGAIN!")
continue
return amount
def choose_number_to_drop(target_amount):
entered = 0
goal = target_amount
entered_numbers = list()
while goal != 0:
try:
print("Available numbers: " + str(numbers_list) + " to get to " + str(target_amount))
entered = int(input("Please enter a number that is available: "))
except ValueError:
print("Invalid Entry, please try again")
continue
if entered not in numbers_list or entered in entered_numbers:
print("Invalid Entry, please try again")
continue
else:
goal -= entered
entered_numbers.append(entered)
if goal < 0:
goal = target_amount
entered_numbers = list()
i = 0
while i < len(entered_numbers):
numbers_list.remove(entered_numbers[i])
i += 1
def check_lost_game(rolled):
value = True
if rolled not in numbers_list:
i = 0
while i < len(numbers_list):
j = i+1
while j< len(numbers_list):
if numbers_list[i] + numbers_list[j] == rolled:
return False
k = j+1
while k < len(numbers_list):
if numbers_list[i] + numbers_list[j] + numbers_list[k] == rolled:
return False
l = k+1
while l < len(numbers_list):
if numbers_list[i] + numbers_list[j] + numbers_list[k] + numbers_list[l] == rolled:
return False
l+=1
k+=1
j+=1
i +=1
else:
value = False
return value
def end_game():
game_completed = True
return game_completed
def win_game():
game_won = True
return game_won
def score_game():
score = 0
i = 0
while i < len(numbers_list):
score += numbers_list[i]
i+=1
return score
def all_less_than_7():
less_than_7 = True
i = 0
while i < len(numbers_list):
if numbers_list[i] > 6:
less_than_7 = False
i += 1
return less_than_7
def keep_playing_input():
while True:
try:
continue_playing = (input("Do you wish to keep playing? y or n: "))
except ValueError:
print("Invalid choice; please try again")
continue
if continue_playing.lower == "y":
return True
else:
return False
keep_playing = True
while keep_playing:
numbers_list = [1,2,3,4,5,6,7,8,9,10]
welcome()
roll_total = 0
while roll_total < 55:
dice_amount = 2
if all_less_than_7():
dice_amount = choose_dice_amount()
dice_total = dice_roll(dice_amount)
print("Your roll is: " + str(dice_total))
if check_lost_game(dice_total):
print("It is impossible to continue the game with this roll")
break
choose_number_to_drop(dice_total)
roll_total += dice_total
if roll_total == 55:
game_won = win_game()
if game_won:
print("Congrats you won!!!!")
games_played +=1
games_won +=1
else:
print("You lose, your score is " + str(score_game()))
print("Numbers remaining: " + str(numbers_list))
games_played += 1
games_lost += 1
total_score += score_game()
average_score = total_score/games_played
game_won = False
print("STATS:\n Games Played: " + str(games_played) + "\nGames Won: " + str(games_won) + "\nGames Lost: " + str(games_lost)
+ "\nAverage Score: " + str(average_score) + "\nTotal Score: " + str(total_score))
keep_playing_input()
| [((29, 17, 29, 37), 'random.randint', 'random.randint', ({(29, 32, 29, 33): '(1)', (29, 35, 29, 36): '(6)'}, {}), '(1, 6)', False, 'import random\n')] |
sm00th/leapp-repository | repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts_selinux.py | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | import warnings
import pytest
from leapp.libraries.actor.systemfacts import get_selinux_status
from leapp.models import SELinuxFacts
no_selinux = False
try:
import selinux
except ImportError:
no_selinux = True
warnings.warn(
'Tests which uses `selinux` will be skipped'
' due to library unavailability.', ImportWarning)
reason_to_skip_msg = "Selinux is not available"
# FIXME: create valid tests...
@pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg)
def test_selinux_enabled_enforcing(monkeypatch):
"""
Test case SELinux is enabled in enforcing mode
"""
monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 1)
monkeypatch.setattr(selinux, 'security_getenforce', lambda: 1)
monkeypatch.setattr(selinux, 'selinux_getenforcemode', lambda: [0, 1])
monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 1)
monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted'])
expected_data = {'policy': 'targeted',
'mls_enabled': True,
'enabled': True,
'runtime_mode': 'enforcing',
'static_mode': 'enforcing'}
assert SELinuxFacts(**expected_data) == get_selinux_status()
@pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg)
def test_selinux_enabled_permissive(monkeypatch):
"""
Test case SELinux is enabled in permissive mode
"""
monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 1)
monkeypatch.setattr(selinux, 'security_getenforce', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getenforcemode', lambda: [0, 0])
monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 1)
monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted'])
expected_data = {'policy': 'targeted',
'mls_enabled': True,
'enabled': True,
'runtime_mode': 'permissive',
'static_mode': 'permissive'}
assert SELinuxFacts(**expected_data) == get_selinux_status()
@pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg)
def test_selinux_disabled(monkeypatch):
"""
Test case SELinux is disabled
"""
monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 0)
monkeypatch.setattr(selinux, 'security_getenforce', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getenforcemode', lambda: [0, 0])
monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted'])
expected_data = {'policy': 'targeted',
'mls_enabled': False,
'enabled': False,
'runtime_mode': 'permissive',
'static_mode': 'permissive'}
assert SELinuxFacts(**expected_data) == get_selinux_status()
class MockNoConfigFileOSError(object):
def __init__(self):
raise OSError
@pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg)
def test_selinux_disabled_no_config_file(monkeypatch):
"""
Test case SELinux is disabled
"""
monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 0)
monkeypatch.setattr(selinux, 'security_getenforce', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getenforcemode', MockNoConfigFileOSError)
monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted'])
expected_data = {'policy': 'targeted',
'mls_enabled': False,
'enabled': False,
'runtime_mode': 'permissive',
'static_mode': 'disabled'}
assert SELinuxFacts(**expected_data) == get_selinux_status()
| [((23, 1, 23, 58), 'pytest.mark.skipif', 'pytest.mark.skipif', (), '', False, 'import pytest\n'), ((41, 1, 41, 58), 'pytest.mark.skipif', 'pytest.mark.skipif', (), '', False, 'import pytest\n'), ((59, 1, 59, 58), 'pytest.mark.skipif', 'pytest.mark.skipif', (), '', False, 'import pytest\n'), ((82, 1, 82, 58), 'pytest.mark.skipif', 'pytest.mark.skipif', (), '', False, 'import pytest\n'), ((13, 4, 15, 57), 'warnings.warn', 'warnings.warn', ({(14, 8, 15, 41): '"""Tests which uses `selinux` will be skipped due to library unavailability."""', (15, 43, 15, 56): 'ImportWarning'}, {}), "(\n 'Tests which uses `selinux` will be skipped due to library unavailability.'\n , ImportWarning)", False, 'import warnings\n'), ((38, 11, 38, 40), 'leapp.models.SELinuxFacts', 'SELinuxFacts', ({}, {}), '(**expected_data)', False, 'from leapp.models import SELinuxFacts\n'), ((38, 44, 38, 64), 'leapp.libraries.actor.systemfacts.get_selinux_status', 'get_selinux_status', ({}, {}), '()', False, 'from leapp.libraries.actor.systemfacts import get_selinux_status\n'), ((56, 11, 56, 40), 'leapp.models.SELinuxFacts', 'SELinuxFacts', ({}, {}), '(**expected_data)', False, 'from leapp.models import SELinuxFacts\n'), ((56, 44, 56, 64), 'leapp.libraries.actor.systemfacts.get_selinux_status', 'get_selinux_status', ({}, {}), '()', False, 'from leapp.libraries.actor.systemfacts import get_selinux_status\n'), ((74, 11, 74, 40), 'leapp.models.SELinuxFacts', 'SELinuxFacts', ({}, {}), '(**expected_data)', False, 'from leapp.models import SELinuxFacts\n'), ((74, 44, 74, 64), 'leapp.libraries.actor.systemfacts.get_selinux_status', 'get_selinux_status', ({}, {}), '()', False, 'from leapp.libraries.actor.systemfacts import get_selinux_status\n'), ((98, 11, 98, 40), 'leapp.models.SELinuxFacts', 'SELinuxFacts', ({}, {}), '(**expected_data)', False, 'from leapp.models import SELinuxFacts\n'), ((98, 44, 98, 64), 'leapp.libraries.actor.systemfacts.get_selinux_status', 'get_selinux_status', ({}, {}), '()', False, 'from leapp.libraries.actor.systemfacts import get_selinux_status\n')] |
emetowinner/python-challenges | Phase-1/Python Basic 2/Day-24.py | 520da69da0f2632deb1e81136d2b62d40555a4aa | """
1. Write a Python program to reverse only the vowels of a given string.
Sample Output:
w3resuorce
Python
Perl
ASU
2. Write a Python program to check whether a given integer is a palindrome or not.
Note: An integer is a palindrome when it reads the same backward as forward. Negative numbers are not palindromic.
Sample Output:
False
True
False
3. Write a Python program to remove the duplicate elements of a given array of numbers such that each element appear only once and return the new length of the given array.
Sample Output:
5
4
4. Write a Python program to calculate the maximum profit from selling and buying values of stock. An array of numbers represent the stock prices in chronological order.
For example, given [8, 10, 7, 5, 7, 15], the function will return 10, since the buying value of the stock is 5 dollars and sell value is 15 dollars.
Sample Output:
10
7
0
5. Write a Python program to remove all instances of a given value from a given array of integers and find the length of the new array.
For example, given [8, 10, 7, 5, 7, 15], the function will return 10, since the buying value of the stock is 5 dollars and sell value is 15 dollars.
Sample Output:
6
0
5
0
6. Write a Python program to find the starting and ending position of a given value in a given array of integers, sorted in ascending order.
If the target is not found in the array, return [0, 0].
Input: [5, 7, 7, 8, 8, 8] target value = 8
Output: [0, 5]
Input: [1, 3, 6, 9, 13, 14] target value = 4
Output: [0, 0]
Sample Output:
[0, 5]
[0, 0]
7. The price of a given stock on each day is stored in an array.
Write a Python program to find the maximum profit in one transaction i.e., buy one and sell one share of the stock from the given price value of the said array. You cannot sell a stock before you buy one.
Input (Stock price of each day): [224, 236, 247, 258, 259, 225]
Output: 35
Explanation:
236 - 224 = 12
247 - 224 = 23
258 - 224 = 34
259 - 224 = 35
225 - 224 = 1
247 - 236 = 11
258 - 236 = 22
259 - 236 = 23
225 - 236 = -11
258 - 247 = 11
259 - 247 = 12
225 - 247 = -22
259 - 258 = 1
225 - 258 = -33
225 - 259 = -34
8. Write a Python program to print a given N by M matrix of numbers line by line in forward > backwards > forward >... order.
Input matrix:
[[1, 2, 3,4],
[5, 6, 7, 8],
[0, 6, 2, 8],
[2, 3, 0, 2]]
Output:
1
2
3
4
8
7
6
5
0
6
2
8
2
0
3
2
9. Write a Python program to compute the largest product of three integers from a given list of integers.
Sample Output:
4000
8
120
10. Write a Python program to find the first missing positive integer that does not exist in a given list.
"""
| [] |
IMULMUL/etl-parser | etl/parsers/etw/Microsoft_Windows_IPxlatCfg.py | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-IPxlatCfg
GUID : 3e5ac668-af52-4c15-b99b-a3e7a6616ebd
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1001, version=0)
class Microsoft_Windows_IPxlatCfg_1001_0(Etw):
pattern = Struct(
"ErrorString" / CString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1002, version=0)
class Microsoft_Windows_IPxlatCfg_1002_0(Etw):
pattern = Struct(
"ErrorString" / CString,
"ErrorCode" / Int32ul,
"InterfaceLuid" / Int64ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1003, version=0)
class Microsoft_Windows_IPxlatCfg_1003_0(Etw):
pattern = Struct(
"InfoString" / CString
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1005, version=0)
class Microsoft_Windows_IPxlatCfg_1005_0(Etw):
pattern = Struct(
"IPv4Address" / Int32ul,
"IPv4Prefix" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1006, version=0)
class Microsoft_Windows_IPxlatCfg_1006_0(Etw):
pattern = Struct(
"InfoString" / CString,
"InterfaceLuid" / Int64ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1007, version=0)
class Microsoft_Windows_IPxlatCfg_1007_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"PrefixLength" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1008, version=0)
class Microsoft_Windows_IPxlatCfg_1008_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"IPv4Address" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1009, version=0)
class Microsoft_Windows_IPxlatCfg_1009_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1010, version=0)
class Microsoft_Windows_IPxlatCfg_1010_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1011, version=0)
class Microsoft_Windows_IPxlatCfg_1011_0(Etw):
pattern = Struct(
"InfoString" / CString,
"MTU" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1101, version=0)
class Microsoft_Windows_IPxlatCfg_1101_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"Metric" / Int32ul,
"RemotePrefixLength" / Int32ul,
"LocalPrefixLength" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1102, version=0)
class Microsoft_Windows_IPxlatCfg_1102_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"Metric" / Int32ul,
"RemotePrefixLength" / Int32ul,
"LocalPrefixLength" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1103, version=0)
class Microsoft_Windows_IPxlatCfg_1103_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"PrefixLength" / Int32ul
)
| [((14, 14, 17, 5), 'construct.Struct', 'Struct', ({(15, 8, 15, 31): "'ErrorString' / CString", (16, 8, 16, 29): "'ErrorCode' / Int32ul"}, {}), "('ErrorString' / CString, 'ErrorCode' / Int32ul)", False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((22, 14, 26, 5), 'construct.Struct', 'Struct', ({(23, 8, 23, 31): "'ErrorString' / CString", (24, 8, 24, 29): "'ErrorCode' / Int32ul", (25, 8, 25, 33): "'InterfaceLuid' / Int64ul"}, {}), "('ErrorString' / CString, 'ErrorCode' / Int32ul, 'InterfaceLuid' /\n Int64ul)", False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((31, 14, 33, 5), 'construct.Struct', 'Struct', ({(32, 8, 32, 30): "'InfoString' / CString"}, {}), "('InfoString' / CString)", False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((38, 14, 41, 5), 'construct.Struct', 'Struct', ({(39, 8, 39, 31): "'IPv4Address' / Int32ul", (40, 8, 40, 30): "'IPv4Prefix' / Int32ul"}, {}), "('IPv4Address' / Int32ul, 'IPv4Prefix' / Int32ul)", False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((46, 14, 49, 5), 'construct.Struct', 'Struct', ({(47, 8, 47, 30): "'InfoString' / CString", (48, 8, 48, 33): "'InterfaceLuid' / Int64ul"}, {}), "('InfoString' / CString, 'InterfaceLuid' / Int64ul)", False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((54, 14, 57, 5), 'construct.Struct', 'Struct', ({(55, 8, 55, 33): "'InterfaceLuid' / Int64ul", (56, 8, 56, 32): "'PrefixLength' / Int32ul"}, {}), "('InterfaceLuid' / Int64ul, 'PrefixLength' / Int32ul)", False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((62, 14, 65, 5), 'construct.Struct', 'Struct', ({(63, 8, 63, 33): "'InterfaceLuid' / Int64ul", (64, 8, 64, 31): "'IPv4Address' / Int32ul"}, {}), "('InterfaceLuid' / Int64ul, 'IPv4Address' / Int32ul)", False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((70, 14, 72, 5), 'construct.Struct', 'Struct', ({(71, 8, 71, 33): "'InterfaceLuid' / Int64ul"}, {}), "('InterfaceLuid' / Int64ul)", False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((77, 14, 79, 5), 'construct.Struct', 'Struct', ({(78, 8, 78, 33): "'InterfaceLuid' / Int64ul"}, {}), "('InterfaceLuid' / Int64ul)", False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((84, 14, 87, 5), 'construct.Struct', 'Struct', ({(85, 8, 85, 30): "'InfoString' / CString", (86, 8, 86, 23): "'MTU' / Int32ul"}, {}), "('InfoString' / CString, 'MTU' / Int32ul)", False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((92, 14, 97, 5), 'construct.Struct', 'Struct', ({(93, 8, 93, 33): "'InterfaceLuid' / Int64ul", (94, 8, 94, 26): "'Metric' / Int32ul", (95, 8, 95, 38): "'RemotePrefixLength' / Int32ul", (96, 8, 96, 37): "'LocalPrefixLength' / Int32ul"}, {}), "('InterfaceLuid' / Int64ul, 'Metric' / Int32ul, 'RemotePrefixLength' /\n Int32ul, 'LocalPrefixLength' / Int32ul)", False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((102, 14, 107, 5), 'construct.Struct', 'Struct', ({(103, 8, 103, 33): "'InterfaceLuid' / Int64ul", (104, 8, 104, 26): "'Metric' / Int32ul", (105, 8, 105, 38): "'RemotePrefixLength' / Int32ul", (106, 8, 106, 37): "'LocalPrefixLength' / Int32ul"}, {}), "('InterfaceLuid' / Int64ul, 'Metric' / Int32ul, 'RemotePrefixLength' /\n Int32ul, 'LocalPrefixLength' / Int32ul)", False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((112, 14, 115, 5), 'construct.Struct', 'Struct', ({(113, 8, 113, 33): "'InterfaceLuid' / Int64ul", (114, 8, 114, 32): "'PrefixLength' / Int32ul"}, {}), "('InterfaceLuid' / Int64ul, 'PrefixLength' / Int32ul)", False, 'from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\n'), ((12, 14, 12, 58), 'etl.parsers.etw.core.guid', 'guid', ({(12, 19, 12, 57): '"""3e5ac668-af52-4c15-b99b-a3e7a6616ebd"""'}, {}), "('3e5ac668-af52-4c15-b99b-a3e7a6616ebd')", False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((20, 14, 20, 58), 'etl.parsers.etw.core.guid', 'guid', ({(20, 19, 20, 57): '"""3e5ac668-af52-4c15-b99b-a3e7a6616ebd"""'}, {}), "('3e5ac668-af52-4c15-b99b-a3e7a6616ebd')", False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((29, 14, 29, 58), 'etl.parsers.etw.core.guid', 'guid', ({(29, 19, 29, 57): '"""3e5ac668-af52-4c15-b99b-a3e7a6616ebd"""'}, {}), "('3e5ac668-af52-4c15-b99b-a3e7a6616ebd')", False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((36, 14, 36, 58), 'etl.parsers.etw.core.guid', 'guid', ({(36, 19, 36, 57): '"""3e5ac668-af52-4c15-b99b-a3e7a6616ebd"""'}, {}), "('3e5ac668-af52-4c15-b99b-a3e7a6616ebd')", False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((44, 14, 44, 58), 'etl.parsers.etw.core.guid', 'guid', ({(44, 19, 44, 57): '"""3e5ac668-af52-4c15-b99b-a3e7a6616ebd"""'}, {}), "('3e5ac668-af52-4c15-b99b-a3e7a6616ebd')", False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((52, 14, 52, 58), 'etl.parsers.etw.core.guid', 'guid', ({(52, 19, 52, 57): '"""3e5ac668-af52-4c15-b99b-a3e7a6616ebd"""'}, {}), "('3e5ac668-af52-4c15-b99b-a3e7a6616ebd')", False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((60, 14, 60, 58), 'etl.parsers.etw.core.guid', 'guid', ({(60, 19, 60, 57): '"""3e5ac668-af52-4c15-b99b-a3e7a6616ebd"""'}, {}), "('3e5ac668-af52-4c15-b99b-a3e7a6616ebd')", False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((68, 14, 68, 58), 'etl.parsers.etw.core.guid', 'guid', ({(68, 19, 68, 57): '"""3e5ac668-af52-4c15-b99b-a3e7a6616ebd"""'}, {}), "('3e5ac668-af52-4c15-b99b-a3e7a6616ebd')", False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((75, 14, 75, 58), 'etl.parsers.etw.core.guid', 'guid', ({(75, 19, 75, 57): '"""3e5ac668-af52-4c15-b99b-a3e7a6616ebd"""'}, {}), "('3e5ac668-af52-4c15-b99b-a3e7a6616ebd')", False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((82, 14, 82, 58), 'etl.parsers.etw.core.guid', 'guid', ({(82, 19, 82, 57): '"""3e5ac668-af52-4c15-b99b-a3e7a6616ebd"""'}, {}), "('3e5ac668-af52-4c15-b99b-a3e7a6616ebd')", False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((90, 14, 90, 58), 'etl.parsers.etw.core.guid', 'guid', ({(90, 19, 90, 57): '"""3e5ac668-af52-4c15-b99b-a3e7a6616ebd"""'}, {}), "('3e5ac668-af52-4c15-b99b-a3e7a6616ebd')", False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((100, 14, 100, 58), 'etl.parsers.etw.core.guid', 'guid', ({(100, 19, 100, 57): '"""3e5ac668-af52-4c15-b99b-a3e7a6616ebd"""'}, {}), "('3e5ac668-af52-4c15-b99b-a3e7a6616ebd')", False, 'from etl.parsers.etw.core import Etw, declare, guid\n'), ((110, 14, 110, 58), 'etl.parsers.etw.core.guid', 'guid', ({(110, 19, 110, 57): '"""3e5ac668-af52-4c15-b99b-a3e7a6616ebd"""'}, {}), "('3e5ac668-af52-4c15-b99b-a3e7a6616ebd')", False, 'from etl.parsers.etw.core import Etw, declare, guid\n')] |
Levakin/sanic-test-app | microservices/users/config.py | d96a54a21f6d0d3b262bbc7bc75f5fa3b12c3b61 | # -*- coding: utf-8 -*-
import os
from distutils.util import strtobool
class Config:
DEBUG = bool(strtobool(os.getenv('DEBUG', "False")))
DATABASE_URI = os.getenv('DATABASE_URI', '127.0.0.1:27017')
WORKERS = int(os.getenv('WORKERS', 2))
LOGO = os.getenv('LOGO', None)
HOST = os.getenv('HOST', '127.0.0.1')
PORT = int(os.getenv('PORT', 8000))
SECRET = os.getenv('SECRET', 'secret')
LOGIN_MIN_LENGTH = int(os.getenv('LOGIN_MIN_LENGTH', 1))
LOGIN_MAX_LENGTH = int(os.getenv('LOGIN_MAX_LENGTH', 32))
| [((9, 19, 9, 63), 'os.getenv', 'os.getenv', ({(9, 29, 9, 43): '"""DATABASE_URI"""', (9, 45, 9, 62): '"""127.0.0.1:27017"""'}, {}), "('DATABASE_URI', '127.0.0.1:27017')", False, 'import os\n'), ((11, 11, 11, 34), 'os.getenv', 'os.getenv', ({(11, 21, 11, 27): '"""LOGO"""', (11, 29, 11, 33): 'None'}, {}), "('LOGO', None)", False, 'import os\n'), ((12, 11, 12, 41), 'os.getenv', 'os.getenv', ({(12, 21, 12, 27): '"""HOST"""', (12, 29, 12, 40): '"""127.0.0.1"""'}, {}), "('HOST', '127.0.0.1')", False, 'import os\n'), ((14, 13, 14, 42), 'os.getenv', 'os.getenv', ({(14, 23, 14, 31): '"""SECRET"""', (14, 33, 14, 41): '"""secret"""'}, {}), "('SECRET', 'secret')", False, 'import os\n'), ((10, 18, 10, 41), 'os.getenv', 'os.getenv', ({(10, 28, 10, 37): '"""WORKERS"""', (10, 39, 10, 40): '2'}, {}), "('WORKERS', 2)", False, 'import os\n'), ((13, 15, 13, 38), 'os.getenv', 'os.getenv', ({(13, 25, 13, 31): '"""PORT"""', (13, 33, 13, 37): '8000'}, {}), "('PORT', 8000)", False, 'import os\n'), ((15, 27, 15, 59), 'os.getenv', 'os.getenv', ({(15, 37, 15, 55): '"""LOGIN_MIN_LENGTH"""', (15, 57, 15, 58): '1'}, {}), "('LOGIN_MIN_LENGTH', 1)", False, 'import os\n'), ((16, 27, 16, 60), 'os.getenv', 'os.getenv', ({(16, 37, 16, 55): '"""LOGIN_MAX_LENGTH"""', (16, 57, 16, 59): '32'}, {}), "('LOGIN_MAX_LENGTH', 32)", False, 'import os\n'), ((8, 27, 8, 54), 'os.getenv', 'os.getenv', ({(8, 37, 8, 44): '"""DEBUG"""', (8, 46, 8, 53): '"""False"""'}, {}), "('DEBUG', 'False')", False, 'import os\n')] |
Temurson/semantic | semantic-python/test/fixtures/4-01-lambda-literals.py | 2e9cd2c006cec9a0328791e47d8c6d60af6d5a1b | # CHECK-TREE: { const <- \x -> \y -> x; y <- const #true #true; z <- const #false #false; #record { const: const, y : y, z: z, }}
const = lambda x, y: x
y = const(True, True)
z = const(False, False)
| [] |
mithi/semantic-segmentation | main.py | 85e9df04397745e0c6ab252e30991fa9b514ec1a | import tensorflow as tf
import os.path
import warnings
from distutils.version import LooseVersion
import glob
import helper
import project_tests as tests
#--------------------------
# USER-SPECIFIED DATA
#--------------------------
# Tune these parameters
NUMBER_OF_CLASSES = 2
IMAGE_SHAPE = (160, 576)
EPOCHS = 20
BATCH_SIZE = 1
LEARNING_RATE = 0.0001
DROPOUT = 0.75
# Specify these directory paths
DATA_DIRECTORY = './data'
RUNS_DIRECTORY = './runs'
TRAINING_DATA_DIRECTORY ='./data/data_road/training'
NUMBER_OF_IMAGES = len(glob.glob('./data/data_road/training/calib/*.*'))
VGG_PATH = './data/vgg'
all_training_losses = [] # Used for plotting to visualize if our training is going well given parameters
#--------------------------
# DEPENDENCY CHECK
#--------------------------
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
#--------------------------
# PLACEHOLDER TENSORS
#--------------------------
correct_label = tf.placeholder(tf.float32, [None, IMAGE_SHAPE[0], IMAGE_SHAPE[1], NUMBER_OF_CLASSES])
learning_rate = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32)
#--------------------------
# FUNCTIONS
#--------------------------
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
sess: TensorFlow Session
vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3, layer4, layer7)
"""
# load the model and weights
model = tf.saved_model.loader.load(sess, ['vgg16'], vgg_path)
# Get Tensors to be returned from graph
graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name('image_input:0')
keep_prob = graph.get_tensor_by_name('keep_prob:0')
layer3 = graph.get_tensor_by_name('layer3_out:0')
layer4 = graph.get_tensor_by_name('layer4_out:0')
layer7 = graph.get_tensor_by_name('layer7_out:0')
return image_input, keep_prob, layer3, layer4, layer7
def conv_1x1(layer, layer_name):
""" Return the output of a 1x1 convolution of a layer """
return tf.layers.conv2d(inputs = layer,
filters = NUMBER_OF_CLASSES,
kernel_size = (1, 1),
strides = (1, 1),
name = layer_name)
def upsample(layer, k, s, layer_name):
""" Return the output of transpose convolution given kernel_size k and strides s """
return tf.layers.conv2d_transpose(inputs = layer,
filters = NUMBER_OF_CLASSES,
kernel_size = (k, k),
strides = (s, s),
padding = 'same',
name = layer_name)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes = NUMBER_OF_CLASSES):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
vgg_layerX_out: TF Tensor for VGG Layer X output
num_classes: Number of classes to classify
return: The Tensor for the last layer of output
"""
# Use a shorter variable name for simplicity
layer3, layer4, layer7 = vgg_layer3_out, vgg_layer4_out, vgg_layer7_out
# Apply a 1x1 convolution to encoder layers
layer3x = conv_1x1(layer = layer3, layer_name = "layer3conv1x1")
layer4x = conv_1x1(layer = layer4, layer_name = "layer4conv1x1")
layer7x = conv_1x1(layer = layer7, layer_name = "layer7conv1x1")
# Add decoder layers to the network with skip connections and upsampling
# Note: the kernel size and strides are the same as the example in Udacity Lectures
# Semantic Segmentation Scene Understanding Lesson 10-9: FCN-8 - Decoder
decoderlayer1 = upsample(layer = layer7x, k = 4, s = 2, layer_name = "decoderlayer1")
decoderlayer2 = tf.add(decoderlayer1, layer4x, name = "decoderlayer2")
decoderlayer3 = upsample(layer = decoderlayer2, k = 4, s = 2, layer_name = "decoderlayer3")
decoderlayer4 = tf.add(decoderlayer3, layer3x, name = "decoderlayer4")
decoderlayer_output = upsample(layer = decoderlayer4, k = 16, s = 8, layer_name = "decoderlayer_output")
return decoderlayer_output
def optimize(nn_last_layer, correct_label, learning_rate, num_classes = NUMBER_OF_CLASSES):
"""
Build the TensorFLow loss and optimizer operations.
nn_last_layer: TF Tensor of the last layer in the neural network
correct_label: TF Placeholder for the correct label image
learning_rate: TF Placeholder for the learning rate
num_classes: Number of classes to classify
return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# Reshape 4D tensors to 2D, each row represents a pixel, each column a class
logits = tf.reshape(nn_last_layer, (-1, num_classes))
class_labels = tf.reshape(correct_label, (-1, num_classes))
# The cross_entropy_loss is the cost which we are trying to minimize to yield higher accuracy
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = class_labels)
cross_entropy_loss = tf.reduce_mean(cross_entropy)
# The model implements this operation to find the weights/parameters that would yield correct pixel labels
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
sess: TF Session
epochs: Number of epochs
batch_size: Batch size
get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
train_op: TF Operation to train the neural network
cross_entropy_loss: TF Tensor for the amount of loss
input_image: TF Placeholder for input images
correct_label: TF Placeholder for label images
keep_prob: TF Placeholder for dropout keep probability
learning_rate: TF Placeholder for learning rate
"""
for epoch in range(EPOCHS):
losses, i = [], 0
for images, labels in get_batches_fn(BATCH_SIZE):
i += 1
feed = { input_image: images,
correct_label: labels,
keep_prob: DROPOUT,
learning_rate: LEARNING_RATE }
_, partial_loss = sess.run([train_op, cross_entropy_loss], feed_dict = feed)
print("---> iteration: ", i, " partial loss:", partial_loss)
losses.append(partial_loss)
training_loss = sum(losses) / len(losses)
all_training_losses.append(training_loss)
print("------------------")
print("epoch: ", epoch + 1, " of ", EPOCHS, "training loss: ", training_loss)
print("------------------")
def run_tests():
tests.test_layers(layers)
tests.test_optimize(optimize)
tests.test_for_kitti_dataset(DATA_DIRECTORY)
tests.test_train_nn(train_nn)
def run():
""" Run a train a model and save output images resulting from the test image fed on the trained model """
# Get vgg model if we can't find it where it should be
helper.maybe_download_pretrained_vgg(DATA_DIRECTORY)
# A function to get batches
get_batches_fn = helper.gen_batch_function(TRAINING_DATA_DIRECTORY, IMAGE_SHAPE)
with tf.Session() as session:
# Returns the three layers, keep probability and input layer from the vgg architecture
image_input, keep_prob, layer3, layer4, layer7 = load_vgg(session, VGG_PATH)
# The resulting network architecture from adding a decoder on top of the given vgg model
model_output = layers(layer3, layer4, layer7, NUMBER_OF_CLASSES)
# Returns the output logits, training operation and cost operation to be used
# - logits: each row represents a pixel, each column a class
# - train_op: function used to get the right parameters to the model to correctly label the pixels
# - cross_entropy_loss: function outputting the cost which we are minimizing, lower cost should yield higher accuracy
logits, train_op, cross_entropy_loss = optimize(model_output, correct_label, learning_rate, NUMBER_OF_CLASSES)
# Initialize all variables
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
# Train the neural network
train_nn(session, EPOCHS, BATCH_SIZE, get_batches_fn,
train_op, cross_entropy_loss, image_input,
correct_label, keep_prob, learning_rate)
# Run the model with the test images and save each painted output image (roads painted green)
helper.save_inference_samples(RUNS_DIRECTORY, DATA_DIRECTORY, session, IMAGE_SHAPE, logits, keep_prob, image_input)
#--------------------------
# MAIN
#--------------------------
if __name__ == "__main__":
run_tests()
run() # Run a train a model and save output images resulting from the test image fed on the trained model
print(all_training_losses)
| [((54, 16, 54, 101), 'tensorflow.placeholder', 'tf.placeholder', ({(54, 31, 54, 41): 'tf.float32', (54, 43, 54, 100): '[None, IMAGE_SHAPE[0], IMAGE_SHAPE[1], NUMBER_OF_CLASSES]'}, {}), '(tf.float32, [None, IMAGE_SHAPE[0], IMAGE_SHAPE[1],\n NUMBER_OF_CLASSES])', True, 'import tensorflow as tf\n'), ((55, 16, 55, 42), 'tensorflow.placeholder', 'tf.placeholder', ({(55, 31, 55, 41): 'tf.float32'}, {}), '(tf.float32)', True, 'import tensorflow as tf\n'), ((56, 12, 56, 38), 'tensorflow.placeholder', 'tf.placeholder', ({(56, 27, 56, 37): 'tf.float32'}, {}), '(tf.float32)', True, 'import tensorflow as tf\n'), ((30, 23, 30, 71), 'glob.glob', 'glob.glob', ({(30, 33, 30, 70): '"""./data/data_road/training/calib/*.*"""'}, {}), "('./data/data_road/training/calib/*.*')", False, 'import glob\n'), ((40, 7, 40, 35), 'distutils.version.LooseVersion', 'LooseVersion', ({(40, 20, 40, 34): 'tf.__version__'}, {}), '(tf.__version__)', False, 'from distutils.version import LooseVersion\n'), ((40, 39, 40, 58), 'distutils.version.LooseVersion', 'LooseVersion', ({(40, 52, 40, 57): '"""1.0"""'}, {}), "('1.0')", False, 'from distutils.version import LooseVersion\n'), ((45, 7, 45, 32), 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((46, 2, 46, 79), 'warnings.warn', 'warnings.warn', ({(46, 16, 46, 78): '"""No GPU found. Please use a GPU to train your neural network."""'}, {}), "('No GPU found. Please use a GPU to train your neural network.')", False, 'import warnings\n'), ((71, 10, 71, 63), 'tensorflow.saved_model.loader.load', 'tf.saved_model.loader.load', ({(71, 37, 71, 41): 'sess', (71, 43, 71, 52): "['vgg16']", (71, 54, 71, 62): 'vgg_path'}, {}), "(sess, ['vgg16'], vgg_path)", True, 'import tensorflow as tf\n'), ((74, 10, 74, 32), 'tensorflow.get_default_graph', 'tf.get_default_graph', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((86, 9, 90, 44), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (), '', True, 'import tensorflow as tf\n'), ((95, 9, 100, 54), 'tensorflow.layers.conv2d_transpose', 'tf.layers.conv2d_transpose', (), '', True, 'import tensorflow as tf\n'), ((123, 18, 123, 72), 'tensorflow.add', 'tf.add', (), '', True, 'import tensorflow as tf\n'), ((125, 18, 125, 72), 'tensorflow.add', 'tf.add', (), '', True, 'import tensorflow as tf\n'), ((141, 11, 141, 55), 'tensorflow.reshape', 'tf.reshape', ({(141, 22, 141, 35): 'nn_last_layer', (141, 37, 141, 54): '(-1, num_classes)'}, {}), '(nn_last_layer, (-1, num_classes))', True, 'import tensorflow as tf\n'), ((142, 17, 142, 61), 'tensorflow.reshape', 'tf.reshape', ({(142, 28, 142, 41): 'correct_label', (142, 43, 142, 60): '(-1, num_classes)'}, {}), '(correct_label, (-1, num_classes))', True, 'import tensorflow as tf\n'), ((145, 18, 145, 97), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', (), '', True, 'import tensorflow as tf\n'), ((146, 23, 146, 52), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(146, 38, 146, 51): 'cross_entropy'}, {}), '(cross_entropy)', True, 'import tensorflow as tf\n'), ((198, 2, 198, 27), 'project_tests.test_layers', 'tests.test_layers', ({(198, 20, 198, 26): 'layers'}, {}), '(layers)', True, 'import project_tests as tests\n'), ((199, 2, 199, 31), 'project_tests.test_optimize', 'tests.test_optimize', ({(199, 22, 199, 30): 'optimize'}, {}), '(optimize)', True, 'import project_tests as tests\n'), ((200, 2, 200, 46), 'project_tests.test_for_kitti_dataset', 'tests.test_for_kitti_dataset', ({(200, 31, 200, 45): 'DATA_DIRECTORY'}, {}), '(DATA_DIRECTORY)', True, 'import project_tests as tests\n'), ((201, 2, 201, 31), 'project_tests.test_train_nn', 'tests.test_train_nn', ({(201, 22, 201, 30): 'train_nn'}, {}), '(train_nn)', True, 'import project_tests as tests\n'), ((208, 2, 208, 54), 'helper.maybe_download_pretrained_vgg', 'helper.maybe_download_pretrained_vgg', ({(208, 39, 208, 53): 'DATA_DIRECTORY'}, {}), '(DATA_DIRECTORY)', False, 'import helper\n'), ((211, 19, 211, 82), 'helper.gen_batch_function', 'helper.gen_batch_function', ({(211, 45, 211, 68): 'TRAINING_DATA_DIRECTORY', (211, 70, 211, 81): 'IMAGE_SHAPE'}, {}), '(TRAINING_DATA_DIRECTORY, IMAGE_SHAPE)', False, 'import helper\n'), ((213, 7, 213, 19), 'tensorflow.Session', 'tf.Session', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((237, 4, 237, 119), 'helper.save_inference_samples', 'helper.save_inference_samples', ({(237, 34, 237, 48): 'RUNS_DIRECTORY', (237, 50, 237, 64): 'DATA_DIRECTORY', (237, 66, 237, 73): 'session', (237, 75, 237, 86): 'IMAGE_SHAPE', (237, 88, 237, 94): 'logits', (237, 96, 237, 105): 'keep_prob', (237, 107, 237, 118): 'image_input'}, {}), '(RUNS_DIRECTORY, DATA_DIRECTORY, session,\n IMAGE_SHAPE, logits, keep_prob, image_input)', False, 'import helper\n'), ((48, 40, 48, 65), 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((149, 13, 149, 50), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ({(149, 36, 149, 49): 'learning_rate'}, {}), '(learning_rate)', True, 'import tensorflow as tf\n'), ((228, 16, 228, 49), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((229, 16, 229, 48), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ({}, {}), '()', True, 'import tensorflow as tf\n')] |
BrunoReboul/forseti-security | tests/scanner/audit/log_sink_rules_engine_test.py | 9d4a61b3e5a5d22a4330d15ddf61063fc9079071 | # Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the LogSinkRulesEngine."""
import unittest
import mock
from tests.unittest_utils import ForsetiTestCase
from tests.unittest_utils import get_datafile_path
from google.cloud.forseti.common.gcp_type.billing_account import BillingAccount
from google.cloud.forseti.common.gcp_type.folder import Folder
from google.cloud.forseti.common.gcp_type.log_sink import LogSink
from google.cloud.forseti.common.gcp_type.organization import Organization
from google.cloud.forseti.common.gcp_type.project import Project
from google.cloud.forseti.scanner.audit import log_sink_rules_engine as lsre
from google.cloud.forseti.scanner.audit.errors import InvalidRulesSchemaError
class LogSinkRulesEngineTest(ForsetiTestCase):
"""Tests for the LogSinkRulesEngine."""
def setUp(self):
"""Set up GCP resources for tests."""
self.lsre = lsre
self.lsre.LOGGER = mock.MagicMock()
# Set up resources in the following hierarchy:
# +-----> billing_acct_abcd
# |
# |
# +-----------------------> proj-1
# |
# |
# org_234 +-----> folder_56 +-----> proj-2
# |
# |
# +-----------------------> proj-3
self.org_234 = Organization(
'234',
display_name='Organization 234',
full_name='organization/234/',
data='fake_org_data_234')
self.billing_acct_abcd = BillingAccount(
'ABCD-1234',
display_name='Billing Account ABCD',
full_name='organization/234/billingAccount/ABCD-1234/',
data='fake_billing_account_data_abcd')
self.folder_56 = Folder(
'56',
display_name='Folder 56',
full_name='organization/234/folder/56/',
data='fake_folder_data456456')
self.proj_1 = Project(
'proj-1',
project_number=11223344,
display_name='My project 1',
parent=self.org_234,
full_name='organization/234/project/proj-1/',
data='fake_project_data_2341')
self.proj_2 = Project(
'proj-2',
project_number=223344,
display_name='My project 2',
parent=self.folder_56,
full_name='organization/234/folder/56/project/proj-2/',
data='fake_project_data_4562')
self.proj_3 = Project(
'proj-3',
project_number=33445566,
display_name='My project 3',
parent=self.org_234,
full_name='organization/234/project/proj-3/',
data='fake_project_data_1233')
def get_engine_with_valid_rules(self):
"""Create a rule engine build with a valid rules file."""
rules_local_path = get_datafile_path(
__file__, 'log_sink_test_valid_rules.yaml')
rules_engine = self.lsre.LogSinkRulesEngine(
rules_file_path=rules_local_path)
rules_engine.build_rule_book()
return rules_engine
def test_build_rule_book_from_local_yaml_file_works(self):
"""Tests that a RuleBook is built correctly with a yaml file."""
rules_engine = self.get_engine_with_valid_rules()
# Creates 'self' rules for 5 difference resources and 'children' rules
# for 2.
self.assertEqual(
6, len(rules_engine.rule_book.resource_rules_map['self']))
self.assertEqual(
2, len(rules_engine.rule_book.resource_rules_map['children']))
self_rule_resources = []
for resource in rules_engine.rule_book.resource_rules_map['self']:
self_rule_resources.append(resource.name)
expected_rule_resources = [
'billingAccounts/ABCD-1234', 'folders/56', 'organizations/234',
'projects/proj-1', 'projects/proj-2', 'projects/proj-3']
self.assertEqual(expected_rule_resources, sorted(self_rule_resources))
child_rule_resources = []
for resource in rules_engine.rule_book.resource_rules_map['children']:
child_rule_resources.append(resource.name)
expected_rule_resources = ['folders/56', 'organizations/234']
self.assertEqual(expected_rule_resources, sorted(child_rule_resources))
def test_build_rule_book_invalid_applies_to_fails(self):
"""Tests that a rule with invalid applies_to type cannot be created."""
rules_local_path = get_datafile_path(
__file__, 'log_sink_test_invalid_rules.yaml')
rules_engine = self.lsre.LogSinkRulesEngine(
rules_file_path=rules_local_path)
with self.assertRaises(InvalidRulesSchemaError):
rules_engine.build_rule_book()
def test_project_with_no_violations(self):
"""Tests that no violations are produced for a correct project."""
rules_engine = self.get_engine_with_valid_rules()
# proj-1 needs an Audit Log sink.
log_sinks = [
LogSink(
sink_id='audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/proj_1_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:[email protected]',
parent=self.proj_1,
raw_json='_SINK_1_'
),
LogSink(
sink_id='compute_logs_saver',
destination=('bigquery.googleapis.com/projects/proj_1/'
'datasets/compute_logs'),
sink_filter='resource.type="gce_instance"',
include_children=False,
writer_identity=('serviceAccount:p12345-67890@'
'gcp-sa-logging.iam.gserviceaccount.com'),
parent=self.proj_1,
raw_json='_SINK_2_'
)
]
actual_violations = rules_engine.find_violations(
self.proj_1, log_sinks)
self.assertEqual(set(), actual_violations)
def test_folder_with_no_violations(self):
"""Tests that no violations are produced for a correct folder."""
rules_engine = self.get_engine_with_valid_rules()
# Rules disallow any folder-level LogSinks.
actual_violations = rules_engine.find_violations(self.folder_56, [])
self.assertEqual(set(), actual_violations)
def test_billing_account_with_no_violations(self):
"""Tests that no violations are produced for a correct billing acct."""
rules_engine = self.get_engine_with_valid_rules()
log_sinks = [
LogSink(
sink_id='billing_logs',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/billing_logs'),
sink_filter='',
include_children=False,
writer_identity='serviceAccount:[email protected]',
parent=self.billing_acct_abcd,
raw_json='__SINK_1__'
),
]
actual_violations = rules_engine.find_violations(
self.billing_acct_abcd, log_sinks)
self.assertEqual(set(), actual_violations)
def test_org_with_no_violations(self):
"""Tests that no violations are produced for a correct organization."""
rules_engine = self.get_engine_with_valid_rules()
# Org needs an Audit Log sink, but to any destination.
log_sinks = [
LogSink(
sink_id='audit_logs_to_pubsub',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'org-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=True,
writer_identity='serviceAccount:[email protected]',
parent=self.org_234,
raw_json='__SINK_1__'
)
]
actual_violations = rules_engine.find_violations(
self.org_234, log_sinks)
self.assertEqual(set(), actual_violations)
def test_project_missing_required_sinks(self):
"""Tests violations are produced for project missing required sinks."""
rules_engine = self.get_engine_with_valid_rules()
# proj-2 needs an Audit Log sink, by org-level rules, and a pubsub
# sink, by folder-level rules.
log_sinks = [
LogSink(
sink_id='non_audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/proj_2_logs'),
sink_filter='logName:"logs/non-cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:[email protected]',
parent=self.proj_2,
raw_json='__SINK_1__'
),
LogSink(
sink_id='compute_logs_saver',
destination=('bigquery.googleapis.com/projects/proj_2/'
'datasets/compute_logs'),
sink_filter='resource.type="gce_instance"',
include_children=False,
writer_identity=('serviceAccount:p12345-67890@'
'gcp-sa-logging.iam.gserviceaccount.com'),
parent=self.proj_2,
raw_json='__SINK_2__'
)
]
actual_violations = rules_engine.find_violations(
self.proj_2, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='proj-2',
resource_type='project',
resource_id='proj-2',
full_name='organization/234/folder/56/project/proj-2/',
rule_name='Require Audit Log sinks in all projects.',
rule_index=0,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('^bigquery\\.googleapis\\.com\\/projects\\/'
'my\\-audit\\-logs\\/datasets\\/.+$'),
sink_filter=('^logName\\:\\"logs\\/'
'cloudaudit\\.googleapis\\.com\\"$'),
sink_include_children='*',
resource_data=''
),
lsre.Rule.RuleViolation(
resource_name='proj-2',
resource_type='project',
resource_id='proj-2',
full_name='organization/234/folder/56/project/proj-2/',
rule_name='Require a PubSub sink in folder-56 projects.',
rule_index=3,
violation_type='LOG_SINK_VIOLATION',
sink_destination='^pubsub\\.googleapis\\.com\\/.+$',
sink_filter='^$',
sink_include_children='*',
resource_data=''
)
])
self.assertEqual(expected_violations, actual_violations)
def test_project_whitelist_violation(self):
"""Tests violations are produced for non-whitelisted sinks."""
rules_engine = self.get_engine_with_valid_rules()
# proj-3 can only have BigQuery sinks.
log_sinks = [
LogSink(
sink_id='audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/proj_1_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:[email protected]',
parent=self.proj_3,
raw_json='__SINK_1__'
),
LogSink(
sink_id='audit_logs_to_pubsub',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'proj-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=True,
writer_identity='serviceAccount:[email protected]',
parent=self.proj_3,
raw_json='__SINK_2__'
)
]
actual_violations = rules_engine.find_violations(
self.proj_3, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='projects/proj-3/sinks/audit_logs_to_pubsub',
resource_type='sink',
resource_id='audit_logs_to_pubsub',
full_name='organization/234/project/proj-3/audit_logs_to_pubsub/',
rule_name='Only allow BigQuery sinks in Proj-1 and Proj-3.',
rule_index=4,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('pubsub.googleapis.com/projects/proj-3/'
'topics/proj-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
sink_include_children=True,
resource_data='__SINK_2__'
)
])
self.assertEqual(expected_violations, actual_violations)
def test_folder_blacklist_violation(self):
"""Tests violations are produced for blacklisted sinks."""
rules_engine = self.get_engine_with_valid_rules()
# Rules disallow any folder-level LogSinks.
log_sinks = [
LogSink(
sink_id='audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/folder_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:[email protected]',
parent=self.folder_56,
raw_json='__SINK_1__'
)
]
actual_violations = rules_engine.find_violations(
self.folder_56, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='folders/56/sinks/audit_logs_to_bq',
resource_type='sink',
resource_id='audit_logs_to_bq',
full_name='organization/234/folder/56/audit_logs_to_bq/',
rule_name='Disallow folder sinks.',
rule_index=2,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('bigquery.googleapis.com/projects/'
'my-audit-logs/datasets/folder_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
sink_include_children=False,
resource_data='__SINK_1__')
])
self.assertEqual(expected_violations, actual_violations)
def test_billing_account_with_whitelist_violations(self):
"""Tests violations are produced for billing account sinks."""
rules_engine = self.get_engine_with_valid_rules()
log_sinks = [
LogSink(
sink_id='billing_logs',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/wrong_dataset'),
sink_filter='',
include_children=False,
writer_identity='serviceAccount:[email protected]',
parent=self.billing_acct_abcd,
raw_json='__SINK_1__'
),
]
actual_violations = rules_engine.find_violations(
self.billing_acct_abcd, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_type='sink',
resource_id='billing_logs',
resource_name='billingAccounts/ABCD-1234/sinks/billing_logs',
full_name='organization/234/billingAccount/ABCD-1234/billing_logs/',
rule_name=('Only allow Billing Account sinks to audit logs '
'project.'),
rule_index=6,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('bigquery.googleapis.com/projects/'
'my-audit-logs/datasets/wrong_dataset'),
sink_filter='',
sink_include_children=False,
resource_data='__SINK_1__')
])
self.assertEqual(expected_violations, actual_violations)
def test_org_missing_required_sinks(self):
"""Tests violations are produced for an org missing required sinks."""
rules_engine = self.get_engine_with_valid_rules()
# Org needs an Audit Log sink, including children.
log_sinks = [
LogSink(
sink_id='sink_not_including_children',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'org-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:[email protected]',
parent=self.org_234,
raw_json='__SINK_1__'
),
LogSink(
sink_id='sink_with_wrong_filter',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'org-more-logs'),
sink_filter='logName:"logs/otherapi.googleapis.com"',
include_children=True,
writer_identity='serviceAccount:[email protected]',
parent=self.org_234,
raw_json='__SINK_2__'
)
]
actual_violations = rules_engine.find_violations(
self.org_234, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='234',
resource_type='organization',
resource_id='234',
full_name='organization/234/',
rule_name='Require an Org Level audit log sink.',
rule_index=1,
violation_type='LOG_SINK_VIOLATION',
sink_destination='^.*$',
sink_filter=('^logName\\:\\"logs\\/'
'cloudaudit\\.googleapis\\.com\\"$'),
sink_include_children=True,
resource_data=''
)
])
self.assertEqual(expected_violations, actual_violations)
def test_add_invalid_rules(self):
"""Tests that adding invalid rules raises exceptions."""
rule_book = self.lsre.LogSinkRuleBook(global_configs=None)
valid_resource = {
'type': 'organization',
'applies_to': 'children',
'resource_ids': ['1234']
}
valid_sink_spec = {
'destination': 'bigquery.*',
'filter': '',
'include_children': '*'
}
rule_book.add_rule(
{
'name': 'Valid rule',
'resource': [valid_resource],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, 0)
bad_rules = [
{},
{
'name': 'Mising Resource',
'mode': 'whitelist',
'sink': valid_sink_spec,
}, {
'name': 'Mising sink',
'resource': [valid_resource],
'mode': 'whitelist',
}, {
'name': 'Bad mode',
'resource': [valid_resource],
'sink': valid_sink_spec,
'mode': 'other',
}, {
'name': 'Bad resource type',
'resource': [{
'type': 'bucket',
'applies_to': 'self',
'resource_ids': ['bucket-1']
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Bad applies to type',
'resource': [{
'type': 'folder',
'applies_to': 'self_and_children',
'resource_ids': ['56']
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Bad applies to type',
'resource': [{
'type': 'billing_account',
'applies_to': 'children',
'resource_ids': ['ABCD-1234']
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Empty resource_ids',
'resource': [{
'type': 'project',
'applies_to': 'self',
'resource_ids': []
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Missing filter',
'resource': [valid_resource],
'sink': {
'destination': 'bigquery.*',
'include_children': '*'
},
'mode': 'whitelist'
}, {
'name': 'Bad include_children',
'resource': [valid_resource],
'sink': {
'destination': 'bigquery.*',
'filter': '*',
'include_children': 'Yes'
},
'mode': 'whitelist'
}
]
for rule in bad_rules:
with self.assertRaises(InvalidRulesSchemaError):
rule_book.add_rule(rule, 1)
if __name__ == '__main__':
unittest.main()
| [((547, 4, 547, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((37, 27, 37, 43), 'mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'import mock\n'), ((50, 23, 54, 37), 'google.cloud.forseti.common.gcp_type.organization.Organization', 'Organization', (), '', False, 'from google.cloud.forseti.common.gcp_type.organization import Organization\n'), ((56, 33, 60, 50), 'google.cloud.forseti.common.gcp_type.billing_account.BillingAccount', 'BillingAccount', (), '', False, 'from google.cloud.forseti.common.gcp_type.billing_account import BillingAccount\n'), ((62, 25, 66, 42), 'google.cloud.forseti.common.gcp_type.folder.Folder', 'Folder', (), '', False, 'from google.cloud.forseti.common.gcp_type.folder import Folder\n'), ((68, 22, 74, 42), 'google.cloud.forseti.common.gcp_type.project.Project', 'Project', (), '', False, 'from google.cloud.forseti.common.gcp_type.project import Project\n'), ((76, 22, 82, 42), 'google.cloud.forseti.common.gcp_type.project.Project', 'Project', (), '', False, 'from google.cloud.forseti.common.gcp_type.project import Project\n'), ((84, 22, 90, 42), 'google.cloud.forseti.common.gcp_type.project.Project', 'Project', (), '', False, 'from google.cloud.forseti.common.gcp_type.project import Project\n'), ((94, 27, 95, 55), 'tests.unittest_utils.get_datafile_path', 'get_datafile_path', ({(95, 12, 95, 20): '__file__', (95, 22, 95, 54): '"""log_sink_test_valid_rules.yaml"""'}, {}), "(__file__, 'log_sink_test_valid_rules.yaml')", False, 'from tests.unittest_utils import get_datafile_path\n'), ((126, 27, 127, 57), 'tests.unittest_utils.get_datafile_path', 'get_datafile_path', ({(127, 12, 127, 20): '__file__', (127, 22, 127, 56): '"""log_sink_test_invalid_rules.yaml"""'}, {}), "(__file__, 'log_sink_test_invalid_rules.yaml')", False, 'from tests.unittest_utils import get_datafile_path\n'), ((139, 12, 148, 13), 'google.cloud.forseti.common.gcp_type.log_sink.LogSink', 'LogSink', (), '', False, 'from google.cloud.forseti.common.gcp_type.log_sink import LogSink\n'), ((149, 12, 159, 13), 'google.cloud.forseti.common.gcp_type.log_sink.LogSink', 'LogSink', (), '', False, 'from google.cloud.forseti.common.gcp_type.log_sink import LogSink\n'), ((179, 12, 188, 13), 'google.cloud.forseti.common.gcp_type.log_sink.LogSink', 'LogSink', (), '', False, 'from google.cloud.forseti.common.gcp_type.log_sink import LogSink\n'), ((201, 12, 210, 13), 'google.cloud.forseti.common.gcp_type.log_sink.LogSink', 'LogSink', (), '', False, 'from google.cloud.forseti.common.gcp_type.log_sink import LogSink\n'), ((223, 12, 232, 13), 'google.cloud.forseti.common.gcp_type.log_sink.LogSink', 'LogSink', (), '', False, 'from google.cloud.forseti.common.gcp_type.log_sink import LogSink\n'), ((233, 12, 243, 13), 'google.cloud.forseti.common.gcp_type.log_sink.LogSink', 'LogSink', (), '', False, 'from google.cloud.forseti.common.gcp_type.log_sink import LogSink\n'), ((286, 12, 295, 13), 'google.cloud.forseti.common.gcp_type.log_sink.LogSink', 'LogSink', (), '', False, 'from google.cloud.forseti.common.gcp_type.log_sink import LogSink\n'), ((296, 12, 305, 13), 'google.cloud.forseti.common.gcp_type.log_sink.LogSink', 'LogSink', (), '', False, 'from google.cloud.forseti.common.gcp_type.log_sink import LogSink\n'), ((334, 12, 343, 13), 'google.cloud.forseti.common.gcp_type.log_sink.LogSink', 'LogSink', (), '', False, 'from google.cloud.forseti.common.gcp_type.log_sink import LogSink\n'), ((371, 12, 380, 13), 'google.cloud.forseti.common.gcp_type.log_sink.LogSink', 'LogSink', (), '', False, 'from google.cloud.forseti.common.gcp_type.log_sink import LogSink\n'), ((410, 12, 419, 13), 'google.cloud.forseti.common.gcp_type.log_sink.LogSink', 'LogSink', (), '', False, 'from google.cloud.forseti.common.gcp_type.log_sink import LogSink\n'), ((420, 12, 429, 13), 'google.cloud.forseti.common.gcp_type.log_sink.LogSink', 'LogSink', (), '', False, 'from google.cloud.forseti.common.gcp_type.log_sink import LogSink\n'), ((249, 12, 263, 13), 'google.cloud.forseti.scanner.audit.log_sink_rules_engine.Rule.RuleViolation', 'lsre.Rule.RuleViolation', (), '', True, 'from google.cloud.forseti.scanner.audit import log_sink_rules_engine as lsre\n'), ((264, 12, 276, 13), 'google.cloud.forseti.scanner.audit.log_sink_rules_engine.Rule.RuleViolation', 'lsre.Rule.RuleViolation', (), '', True, 'from google.cloud.forseti.scanner.audit import log_sink_rules_engine as lsre\n'), ((311, 12, 324, 13), 'google.cloud.forseti.scanner.audit.log_sink_rules_engine.Rule.RuleViolation', 'lsre.Rule.RuleViolation', (), '', True, 'from google.cloud.forseti.scanner.audit import log_sink_rules_engine as lsre\n'), ((349, 12, 361, 43), 'google.cloud.forseti.scanner.audit.log_sink_rules_engine.Rule.RuleViolation', 'lsre.Rule.RuleViolation', (), '', True, 'from google.cloud.forseti.scanner.audit import log_sink_rules_engine as lsre\n'), ((386, 12, 399, 43), 'google.cloud.forseti.scanner.audit.log_sink_rules_engine.Rule.RuleViolation', 'lsre.Rule.RuleViolation', (), '', True, 'from google.cloud.forseti.scanner.audit import log_sink_rules_engine as lsre\n'), ((434, 12, 447, 13), 'google.cloud.forseti.scanner.audit.log_sink_rules_engine.Rule.RuleViolation', 'lsre.Rule.RuleViolation', (), '', True, 'from google.cloud.forseti.scanner.audit import log_sink_rules_engine as lsre\n')] |
agupta54/ulca | backend/api/ulca-ums-service/user-management/utilities/orgUtils.py | c1f570ac254ce2ac73f40c49716458f4f7cbaee2 | import uuid
from config import USR_ORG_MONGO_COLLECTION, USR_MONGO_COLLECTION
import db
from models.response import post_error
import logging
log = logging.getLogger('file')
class OrgUtils:
def __init__(self):
pass
#orgId generation
@staticmethod
def generate_org_id():
"""UUID generation for org registeration"""
return(uuid.uuid4().hex)
@staticmethod
def validate_org(org_code):
"""Validating Org
Org should be registered and active on Anuvaad system.
"""
try:
#connecting to mongo instance/collection
collections = db.get_db()[USR_ORG_MONGO_COLLECTION]
#searching for active org record
result = collections.find({"code": org_code}, {"_id": 0, "active": 1})
if result.count() == 0:
return post_error("Invalid Organization", "No such registered organization with the given Org Id", None)
for value in result:
if value["active"] == False:
return post_error("Invalid Organization", "Organization is currently inactive", None)
except Exception as e:
log.exception(f"Db connection exception : {e}")
return post_error("Database connection exception", "An error occurred while connecting to the database:{}".format(str(e)), None)
@staticmethod
def validate_org_upsert(i,org):
"""Org validation on upsert
deactivation of org allowed only once all the users in the corresponding org is inactive.
"""
if "code" not in org or not org["code"]:
return post_error("Data Missing", "code not found", None)
if "active" not in org:
return post_error("Data Missing", "active not found", None)
code = str(org["code"]).upper()
active = org["active"]
if not isinstance(active,bool):
return post_error("Invalid format", "active should be bool", None), 400
if active == False:
try:
#connecting to mongo instance/collection
collections = db.get_db()[USR_MONGO_COLLECTION]
#searching for active users in the org
result = collections.find({"orgID": code,"is_active":True})
if result.count()!=0:
log.info("Deactivation request for org failed, {} active users with the orgID".format(str(result.count())))
return post_error("Deactivation Failed","There exist active users in {} hence this action cannot be performed".format(code),None)
except Exception as e:
log.exception(f"Db connection exception : {e}")
return post_error("Database connection exception", "An error occurred while connecting to the database:{}".format(str(e)), None) | [((7, 6, 7, 31), 'logging.getLogger', 'logging.getLogger', ({(7, 24, 7, 30): '"""file"""'}, {}), "('file')", False, 'import logging\n'), ((18, 15, 18, 27), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((51, 19, 51, 69), 'models.response.post_error', 'post_error', ({(51, 30, 51, 44): '"""Data Missing"""', (51, 46, 51, 62): '"""code not found"""', (51, 64, 51, 68): 'None'}, {}), "('Data Missing', 'code not found', None)", False, 'from models.response import post_error\n'), ((53, 19, 53, 71), 'models.response.post_error', 'post_error', ({(53, 30, 53, 44): '"""Data Missing"""', (53, 46, 53, 64): '"""active not found"""', (53, 66, 53, 70): 'None'}, {}), "('Data Missing', 'active not found', None)", False, 'from models.response import post_error\n'), ((30, 26, 30, 37), 'db.get_db', 'db.get_db', ({}, {}), '()', False, 'import db\n'), ((34, 23, 34, 120), 'models.response.post_error', 'post_error', ({(34, 34, 34, 56): '"""Invalid Organization"""', (34, 58, 34, 113): '"""No such registered organization with the given Org Id"""', (34, 115, 34, 119): 'None'}, {}), "('Invalid Organization',\n 'No such registered organization with the given Org Id', None)", False, 'from models.response import post_error\n'), ((57, 19, 57, 78), 'models.response.post_error', 'post_error', ({(57, 30, 57, 46): '"""Invalid format"""', (57, 48, 57, 71): '"""active should be bool"""', (57, 73, 57, 77): 'None'}, {}), "('Invalid format', 'active should be bool', None)", False, 'from models.response import post_error\n'), ((37, 27, 37, 105), 'models.response.post_error', 'post_error', ({(37, 38, 37, 60): '"""Invalid Organization"""', (37, 62, 37, 98): '"""Organization is currently inactive"""', (37, 100, 37, 104): 'None'}, {}), "('Invalid Organization', 'Organization is currently inactive', None)", False, 'from models.response import post_error\n'), ((61, 30, 61, 41), 'db.get_db', 'db.get_db', ({}, {}), '()', False, 'import db\n')] |
AntonBiryukovUofC/diffvg | setup.py | e081098f52b82bfd0b7e91114d289d65ef969a60 | # Adapted from https://github.com/pybind/cmake_example/blob/master/setup.py
import os
import re
import sys
import platform
import subprocess
import importlib
from sysconfig import get_paths
import importlib
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.install import install
from distutils.sysconfig import get_config_var
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir, build_with_cuda):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
self.build_with_cuda = build_with_cuda
class Build(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
super().run()
def build_extension(self, ext):
if isinstance(ext, CMakeExtension):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
info = get_paths()
include_path = info['include']
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_INCLUDE_PATH=' + include_path,
]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir),
'-DCMAKE_RUNTIME_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2 ** 32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j8']
if ext.build_with_cuda:
cmake_args += ['-DDIFFVG_CUDA=1']
else:
cmake_args += ['-DDIFFVG_CUDA=0']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
env_build = env
env["CXX"] = "/usr/bin/g++-5"
env["CC"] = "/usr/bin/gcc-5"
env_build["CXX"] = "/usr/bin/g++-5"
env_build["CC"] = "/usr/bin/gcc-5"
env["PATH"] = "/usr/local/cuda-10.1/bin" + ":" + os.environ['PATH']
env_build["PATH"] = "/usr/local/cuda-10.1/bin" + ":" + os.environ['PATH']
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp, env=env_build)
else:
super().build_extension(ext)
torch_spec = importlib.util.find_spec("torch")
tf_spec = importlib.util.find_spec("tensorflow")
packages = []
build_with_cuda = False
if torch_spec is not None:
packages.append('pydiffvg')
import torch
if torch.cuda.is_available():
build_with_cuda = True
if tf_spec is not None and sys.platform != 'win32':
packages.append('pydiffvg_tensorflow')
if not build_with_cuda:
import tensorflow as tf
if tf.test.is_gpu_available(cuda_only=True, min_cuda_compute_capability=None):
build_with_cuda = True
if len(packages) == 0:
print('Error: PyTorch or Tensorflow must be installed. For Windows platform only PyTorch is supported.')
exit()
# Override build_with_cuda with environment variable
if 'DIFFVG_CUDA' in os.environ:
build_with_cuda = os.environ['DIFFVG_CUDA'] == '1'
setup(name='diffvg',
version='0.0.1',
install_requires=["svgpathtools"],
description='Differentiable Vector Graphics',
ext_modules=[CMakeExtension('diffvg', '', build_with_cuda)],
cmdclass=dict(build_ext=Build, install=install),
packages=packages,
zip_safe=False)
| [((81, 13, 81, 46), 'importlib.util.find_spec', 'importlib.util.find_spec', ({(81, 38, 81, 45): '"""torch"""'}, {}), "('torch')", False, 'import importlib\n'), ((82, 10, 82, 48), 'importlib.util.find_spec', 'importlib.util.find_spec', ({(82, 35, 82, 47): '"""tensorflow"""'}, {}), "('tensorflow')", False, 'import importlib\n'), ((89, 7, 89, 32), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((20, 8, 20, 50), 'setuptools.Extension.__init__', 'Extension.__init__', (), '', False, 'from setuptools import setup, Extension\n'), ((21, 25, 21, 51), 'os.path.abspath', 'os.path.abspath', ({(21, 41, 21, 50): 'sourcedir'}, {}), '(sourcedir)', False, 'import os\n'), ((96, 11, 96, 85), 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', (), '', True, 'import tensorflow as tf\n'), ((28, 18, 28, 65), 'subprocess.check_output', 'subprocess.check_output', ({(28, 42, 28, 64): "['cmake', '--version']"}, {}), "(['cmake', '--version'])", False, 'import subprocess\n'), ((38, 19, 38, 30), 'sysconfig.get_paths', 'get_paths', ({}, {}), '()', False, 'from sysconfig import get_paths\n'), ((62, 18, 62, 35), 'os.environ.copy', 'os.environ.copy', ({}, {}), '()', False, 'import os\n'), ((75, 12, 75, 102), 'subprocess.check_call', 'subprocess.check_call', (), '', False, 'import subprocess\n'), ((76, 12, 76, 109), 'subprocess.check_call', 'subprocess.check_call', (), '', False, 'import subprocess\n'), ((47, 15, 47, 32), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n'), ((73, 19, 73, 50), 'os.path.exists', 'os.path.exists', ({(73, 34, 73, 49): 'self.build_temp'}, {}), '(self.build_temp)', False, 'import os\n'), ((74, 16, 74, 44), 'os.makedirs', 'os.makedirs', ({(74, 28, 74, 43): 'self.build_temp'}, {}), '(self.build_temp)', False, 'import os\n')] |
twinters007/robotpy-wpilib-utilities | robotpy_ext/common_drivers/navx/registerio.py | d2e18c16fc97a469e0621521e0fbed0093610d6e | # validated: 2017-02-19 DS c5e3a8a9b642 roborio/java/navx_frc/src/com/kauailabs/navx/frc/RegisterIO.java
#----------------------------------------------------------------------------
# Copyright (c) Kauai Labs 2015. All Rights Reserved.
#
# Created in support of Team 2465 (Kauaibots). Go Purple Wave!
#
# Open Source Software - may be modified and shared by FRC teams. Any
# modifications to this code must be accompanied by the \License.txt file
# in the root directory of the project
#----------------------------------------------------------------------------
from ._impl import AHRSProtocol, IMUProtocol, IMURegisters
from wpilib.timer import Timer
import logging
logger = logging.getLogger('navx')
__all__ = ['RegisterIO']
IO_TIMEOUT_SECONDS = 1.0
DELAY_OVERHEAD_SECONDS = 0.004
class _BoardId:
type = 0
hw_rev = 0
fw_ver_major = 0
fw_ver_minor = 0
fw_revision = 0
unique_id = [0]*12
class _BoardState:
op_status = 0
sensor_status = 0
cal_status = 0
selftest_status = 0
capability_flags = 0
update_rate_hz = 0
accel_fsr_g = 0
gyro_fsr_dps = 0
class RegisterIO:
def __init__(self, io_provider, update_rate_hz, notify_sink, board_capabilities):
"""
:param board_capabilities: must have the following callable attributes:
_isOmniMountSupported, _isBoardYawResetSupported,
_isDisplacementSupported
:param notify_sink: must have the following callable attributes:
_setYawPitchRoll, _setAHRSData, _setAHRSPosData,
_setRawData, _setBoardID, _setBoardState, _yawResetComplete
"""
self.io_provider = io_provider
self.update_rate_hz = update_rate_hz
self.board_capabilities = board_capabilities
self.notify_sink = notify_sink
self.raw_data_update = IMUProtocol.GyroUpdate()
self.ahrspos_update = AHRSProtocol.AHRSPosUpdate()
self.board_state = _BoardState()
self.board_id = _BoardId()
self.last_update_time = 0
self.byte_count = 0
self.update_count = 0
self.last_sensor_timestamp = 0
self._stop = False
def stop(self):
self._stop = True
def shutdown(self):
self.io_provider.shutdown()
def run(self):
logger.info("NavX io thread starting")
try:
self.io_provider.init()
# initial device configuration
self.setUpdateRateHz(self.update_rate_hz)
if not self.getConfiguration():
logger.warning("-- Did not get configuration data")
else:
logger.info("-- Board is %s (rev %s)",
IMURegisters.model_type(self.board_id.type),
self.board_id.hw_rev)
logger.info("-- Firmware %s.%s", self.board_id.fw_ver_major,
self.board_id.fw_ver_minor)
log_error = True
# Calculate delay to match configured update rate
# Note: some additional time is removed from the
# 1/update_rate value to ensure samples are not
# dropped, esp. at higher update rates.
update_rate = 1.0/(self.update_rate_hz & 0xFF)
if update_rate > DELAY_OVERHEAD_SECONDS:
update_rate -= DELAY_OVERHEAD_SECONDS
logger.info("-- Update rate: %shz (%.4fs)",
self.update_rate_hz, update_rate)
# IO Loop
while not self._stop:
if self.board_state.update_rate_hz != self.update_rate_hz:
self.setUpdateRateHz(self.update_rate_hz)
try:
self.getCurrentData()
except IOError:
if log_error:
logger.exception("Error getting data")
log_error = False
else:
log_error = True
Timer.delay(update_rate)
except Exception:
logger.exception("Unhandled exception in NavX thread")
finally:
logger.info("NavX i/o thread exiting")
def getConfiguration(self):
success = False
retry_count = 0
while retry_count < 5 and not success:
try:
config = self.io_provider.read(IMURegisters.NAVX_REG_WHOAMI,
IMURegisters.NAVX_REG_SENSOR_STATUS_H+1)
except IOError as e:
logger.warning("Error reading configuration data, retrying (%s)", e)
success = False
Timer.delay(0.5)
else:
board_id = self.board_id
board_id.hw_rev = config[IMURegisters.NAVX_REG_HW_REV]
board_id.fw_ver_major = config[IMURegisters.NAVX_REG_FW_VER_MAJOR]
board_id.fw_ver_minor = config[IMURegisters.NAVX_REG_FW_VER_MINOR]
board_id.type = config[IMURegisters.NAVX_REG_WHOAMI]
self.notify_sink._setBoardID(board_id)
board_state = self.board_state
board_state.cal_status = config[IMURegisters.NAVX_REG_CAL_STATUS]
board_state.op_status = config[IMURegisters.NAVX_REG_OP_STATUS]
board_state.selftest_status = config[IMURegisters.NAVX_REG_SELFTEST_STATUS]
board_state.sensor_status = AHRSProtocol.decodeBinaryUint16(config,IMURegisters.NAVX_REG_SENSOR_STATUS_L)
board_state.gyro_fsr_dps = AHRSProtocol.decodeBinaryUint16(config,IMURegisters.NAVX_REG_GYRO_FSR_DPS_L)
board_state.accel_fsr_g = config[IMURegisters.NAVX_REG_ACCEL_FSR_G]
board_state.update_rate_hz = config[IMURegisters.NAVX_REG_UPDATE_RATE_HZ]
board_state.capability_flags = AHRSProtocol.decodeBinaryUint16(config,IMURegisters.NAVX_REG_CAPABILITY_FLAGS_L)
self.notify_sink._setBoardState(board_state)
success = True
retry_count += 1
return success
def getCurrentData(self):
first_address = IMURegisters.NAVX_REG_UPDATE_RATE_HZ
displacement_registers = self.board_capabilities._isDisplacementSupported()
# If firmware supports displacement data, acquire it - otherwise implement
# similar (but potentially less accurate) calculations on this processor.
if displacement_registers:
read_count = IMURegisters.NAVX_REG_LAST + 1 - first_address
else:
read_count = IMURegisters.NAVX_REG_QUAT_OFFSET_Z_H + 1 - first_address
curr_data = self.io_provider.read(first_address, read_count)
sensor_timestamp = AHRSProtocol.decodeBinaryUint32(curr_data, IMURegisters.NAVX_REG_TIMESTAMP_L_L-first_address)
if sensor_timestamp == self.last_sensor_timestamp:
return
self.last_sensor_timestamp = sensor_timestamp
ahrspos_update = self.ahrspos_update
ahrspos_update.op_status = curr_data[IMURegisters.NAVX_REG_OP_STATUS - first_address]
ahrspos_update.selftest_status = curr_data[IMURegisters.NAVX_REG_SELFTEST_STATUS - first_address]
ahrspos_update.cal_status = curr_data[IMURegisters.NAVX_REG_CAL_STATUS]
ahrspos_update.sensor_status = curr_data[IMURegisters.NAVX_REG_SENSOR_STATUS_L - first_address]
ahrspos_update.yaw = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_YAW_L-first_address)
ahrspos_update.pitch = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_PITCH_L-first_address)
ahrspos_update.roll = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_ROLL_L-first_address)
ahrspos_update.compass_heading = AHRSProtocol.decodeProtocolUnsignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_HEADING_L-first_address)
ahrspos_update.mpu_temp_c = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_MPU_TEMP_C_L - first_address)
ahrspos_update.world_linear_accel_x = AHRSProtocol.decodeProtocolSignedThousandthsFloat(curr_data, IMURegisters.NAVX_REG_LINEAR_ACC_X_L-first_address)
ahrspos_update.world_linear_accel_y = AHRSProtocol.decodeProtocolSignedThousandthsFloat(curr_data, IMURegisters.NAVX_REG_LINEAR_ACC_Y_L-first_address)
ahrspos_update.world_linear_accel_z = AHRSProtocol.decodeProtocolSignedThousandthsFloat(curr_data, IMURegisters.NAVX_REG_LINEAR_ACC_Z_L-first_address)
ahrspos_update.altitude = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_ALTITUDE_D_L - first_address)
ahrspos_update.baro_pressure = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_PRESSURE_DL - first_address)
ahrspos_update.fused_heading = AHRSProtocol.decodeProtocolUnsignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_FUSED_HEADING_L-first_address)
ahrspos_update.quaternionW = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_W_L-first_address)/ 32768.0
ahrspos_update.quaternionX = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_X_L-first_address)/ 32768.0
ahrspos_update.quaternionY = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_Y_L-first_address)/ 32768.0
ahrspos_update.quaternionZ = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_Z_L-first_address)/ 32768.0
if displacement_registers:
ahrspos_update.vel_x = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_VEL_X_I_L-first_address)
ahrspos_update.vel_y = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_VEL_Y_I_L-first_address)
ahrspos_update.vel_z = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_VEL_Z_I_L-first_address)
ahrspos_update.disp_x = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_DISP_X_I_L-first_address)
ahrspos_update.disp_y = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_DISP_Y_I_L-first_address)
ahrspos_update.disp_z = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_DISP_Z_I_L-first_address)
self.notify_sink._setAHRSPosData(ahrspos_update, sensor_timestamp)
else:
self.notify_sink._setAHRSData(ahrspos_update, sensor_timestamp)
board_state = self.board_state
board_state.cal_status = curr_data[IMURegisters.NAVX_REG_CAL_STATUS-first_address]
board_state.op_status = curr_data[IMURegisters.NAVX_REG_OP_STATUS-first_address]
board_state.selftest_status = curr_data[IMURegisters.NAVX_REG_SELFTEST_STATUS-first_address]
board_state.sensor_status = AHRSProtocol.decodeBinaryUint16(curr_data,IMURegisters.NAVX_REG_SENSOR_STATUS_L-first_address)
board_state.update_rate_hz = curr_data[IMURegisters.NAVX_REG_UPDATE_RATE_HZ-first_address]
board_state.gyro_fsr_dps = AHRSProtocol.decodeBinaryUint16(curr_data,IMURegisters.NAVX_REG_GYRO_FSR_DPS_L)
board_state.accel_fsr_g = curr_data[IMURegisters.NAVX_REG_ACCEL_FSR_G]
board_state.capability_flags= AHRSProtocol.decodeBinaryUint16(curr_data,IMURegisters.NAVX_REG_CAPABILITY_FLAGS_L-first_address)
self.notify_sink._setBoardState(board_state)
raw_data_update = self.raw_data_update
raw_data_update.raw_gyro_x = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_GYRO_X_L-first_address)
raw_data_update.raw_gyro_y = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_GYRO_Y_L-first_address)
raw_data_update.raw_gyro_z = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_GYRO_Z_L-first_address)
raw_data_update.raw_accel_x = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_ACC_X_L-first_address)
raw_data_update.raw_accel_y = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_ACC_Y_L-first_address)
raw_data_update.raw_accel_z = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_ACC_Z_L-first_address)
raw_data_update.cal_mag_x = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_MAG_X_L-first_address)
raw_data_update.cal_mag_y = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_MAG_Y_L-first_address)
raw_data_update.cal_mag_z = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_MAG_Z_L-first_address)
raw_data_update.mpu_temp_c = ahrspos_update.mpu_temp
self.notify_sink._setRawData(raw_data_update, sensor_timestamp)
self.last_update_time = Timer.getFPGATimestamp()
self.byte_count += len(curr_data)
self.update_count += 1
def isConnected(self):
time_since_last_update = Timer.getFPGATimestamp() - self.last_update_time
return time_since_last_update <= IO_TIMEOUT_SECONDS
def getByteCount(self):
return self.byte_count
def getUpdateCount(self):
return self.update_count
def setUpdateRateHz(self, update_rate_hz):
self.io_provider.write(IMURegisters.NAVX_REG_UPDATE_RATE_HZ, update_rate_hz)
def zeroYaw(self):
self.io_provider.write( IMURegisters.NAVX_REG_INTEGRATION_CTL,
AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_YAW )
self.notify_sink._yawResetComplete()
def zeroDisplacement(self):
self.io_provider.write( IMURegisters.NAVX_REG_INTEGRATION_CTL,
(AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_DISP_X |
AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_DISP_Y |
AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_DISP_Z ) )
| [((17, 9, 17, 34), 'logging.getLogger', 'logging.getLogger', ({(17, 27, 17, 33): '"""navx"""'}, {}), "('navx')", False, 'import logging\n'), ((240, 32, 240, 56), 'wpilib.timer.Timer.getFPGATimestamp', 'Timer.getFPGATimestamp', ({}, {}), '()', False, 'from wpilib.timer import Timer\n'), ((245, 33, 245, 57), 'wpilib.timer.Timer.getFPGATimestamp', 'Timer.getFPGATimestamp', ({}, {}), '()', False, 'from wpilib.timer import Timer\n'), ((123, 16, 123, 40), 'wpilib.timer.Timer.delay', 'Timer.delay', ({(123, 28, 123, 39): 'update_rate'}, {}), '(update_rate)', False, 'from wpilib.timer import Timer\n'), ((140, 16, 140, 32), 'wpilib.timer.Timer.delay', 'Timer.delay', ({(140, 28, 140, 31): '(0.5)'}, {}), '(0.5)', False, 'from wpilib.timer import Timer\n')] |
wvdv2002/RigolWFM | RigolWFM/channel.py | 849a1130c9194f052eaf5582dfa67e7a5708a3a3 | #pylint: disable=invalid-name
#pylint: disable=too-many-instance-attributes
#pylint: disable=too-many-return-statements
#pylint: disable=too-many-statements
"""
Class structure and methods for an oscilloscope channel.
The idea is to collect all the relevant information from all the Rigol
scope waveforms into a single structure that can be handled in a uniform
and consistent manner.
Specifically this lets one just use
channel.times : numpy array of signal times
channel.volts : numpy array of signal voltages
or the stringification method to describe a channel
print(channel)
"""
from enum import Enum
import numpy as np
class UnitEnum(Enum):
"""Enumerated units for scopes without them."""
w = 0
a = 1
v = 2
u = 3
def best_scale(number):
"""Scale and units for a number with proper prefix."""
absnr = abs(number)
if absnr == 0:
return 1, ' '
if absnr < 0.99999999e-9:
return 1e12, 'p'
if absnr < 0.99999999e-6:
return 1e9, 'n'
if absnr < 0.99999999e-3:
return 1e6, 'µ'
if absnr < 0.99999999:
return 1e3, 'm'
if absnr < 0.99999999e3:
return 1, ' '
if absnr < 0.99999999e6:
return 1e-3, 'k'
if absnr < 0.999999991e9:
return 1e-6, 'M'
return 1e-9, 'G'
def engineering_string(number, n_digits):
"""Format number with proper prefix."""
scale, prefix = best_scale(number)
fformat = "%%.%df %%s" % n_digits
s = fformat % (number * scale, prefix)
return s
def _channel_bytes(channel_number, w):
"""
Return right series of bytes for a channel for 1000Z scopes.
Waveform points are interleaved stored in memory when two or more
channels are saved. This unweaves them.
Args:
channel_number: the number of enabled channels before this one
w: original waveform object
Returns
byte array for specified channel
"""
offset = 0
if w.header.stride == 2: # byte pattern CHx CHy
# use odd bytes when this is the second enabled channel
if any([w.header.ch[i].enabled for i in range(channel_number-1)]):
offset = 1
elif w.header.stride == 4: # byte pattern CH4 CH3 CH2 CH1
offset = 4 - channel_number
data = np.frombuffer(w.data.raw, dtype=np.uint8)
raw_bytes = data[offset::w.header.stride]
return raw_bytes
class Channel():
"""Base class for a single channel."""
def __init__(self, w, channel_number, scope, selected='1234'):
"""
Initialize a Channel Object.
Args:
w: Wfm object
channel_number: 1, 2, 3, or 4
scope: string describing scope
selected: string with channels chosen by user
Returns:
Channel object
"""
self.channel_number = channel_number
self.name = "CH %d" % channel_number
self.waveform = w
self.seconds_per_point = w.header.seconds_per_point
self.firmware = 'unknown'
self.unit = UnitEnum.v
self.points = 0
self.raw = None
self.volts = None
self.times = None
self.coupling = 'unknown'
self.roll_stop = 0
self.time_offset = 0
self.time_scale = 1
self.enabled = False
self.enabled_and_selected = False
self.volt_scale = 1
self.volt_offset = 0
self.y_scale = 1
self.y_offset = 0
self.volt_per_division = 1
self.probe_value = 1
self.inverted = False
# determine if this channel is one of those chosen by user
chosen = selected.find(str(channel_number)) != -1
if channel_number <= len(w.header.ch):
channel = w.header.ch[channel_number-1]
self.enabled = channel.enabled
self.enabled_and_selected = channel.enabled and chosen
self.volt_scale = channel.volt_scale
self.volt_offset = channel.volt_offset
self.y_scale = channel.volt_scale
self.y_offset = channel.volt_offset
self.volt_per_division = channel.volt_per_division
self.probe_value = channel.probe_value
self.unit = channel.unit
self.inverted = channel.inverted
if scope == 'wfm1000c':
self.ds1000c(w, channel_number)
elif scope == 'wfm1000d':
self.ds1000d(w, channel_number)
elif scope == 'wfm1000e':
self.ds1000e(w, channel_number)
elif scope == 'wfm1000z':
self.ds1000z(w, channel_number)
elif scope == 'wfm2000':
self.ds2000(w, channel_number)
elif scope == 'wfm4000':
self.ds4000(w, channel_number)
elif scope == 'wfm6000':
self.ds6000(w, channel_number)
def __str__(self):
"""Describe this channel."""
s = " Channel %d:\n" % self.channel_number
s += " Coupling = %8s\n" % self.coupling.rjust(7, ' ')
s += " Scale = %10sV/div\n" % engineering_string(self.volt_per_division, 2)
s += " Offset = %10sV\n" % engineering_string(self.volt_offset, 2)
s += " Probe = %7gX\n" % self.probe_value
s += " Inverted = %8s\n\n" % self.inverted
s += " Time Base = %10ss/div\n" % engineering_string(self.time_scale, 3)
s += " Offset = %10ss\n" % engineering_string(self.time_offset, 3)
s += " Delta = %10ss/point\n" % engineering_string(self.seconds_per_point, 3)
s += " Points = %8d\n\n" % self.points
if self.enabled_and_selected:
s += " Count = [%9d,%9d,%9d ... %9d,%9d]\n" % (
1, 2, 3, self.points-1, self.points)
s += " Raw = [%9d,%9d,%9d ... %9d,%9d]\n" % (
self.raw[0], self.raw[1], self.raw[2], self.raw[-2], self.raw[-1])
t = [engineering_string(self.times[i], 3) +
"s" for i in [0, 1, 2, -2, -1]]
s += " Times = [%9s,%9s,%9s ... %9s,%9s]\n" % (
t[0], t[1], t[2], t[-2], t[-1])
v = [engineering_string(self.volts[i], 2) +
"V" for i in [0, 1, 2, -2, -1]]
s += " Volts = [%9s,%9s,%9s ... %9s,%9s]\n" % (
v[0], v[1], v[2], v[-2], v[-1])
return s
def calc_times_and_volts(self):
"""Calculate the times and voltages for this channel."""
if self.enabled_and_selected:
self.volts = self.y_scale * (127.0 - self.raw) - self.y_offset
h = self.points * self.seconds_per_point / 2
self.times = np.linspace(-h, h, self.points) + self.time_offset
def ds1000c(self, w, channel_number):
"""Interpret waveform data for 1000CD series scopes."""
self.time_scale = 1.0e-12 * w.header.time_scale
self.time_offset = 1.0e-12 * w.header.time_offset
if channel_number == 1:
if self.enabled_and_selected:
self.points = len(w.data.ch1)
self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8)
if channel_number == 2:
if self.enabled_and_selected:
self.points = len(w.data.ch2)
self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8)
self.calc_times_and_volts()
def ds1000d(self, w, channel_number):
"""Interpret waveform data for 1000CD series scopes."""
self.time_scale = 1.0e-12 * w.header.time_scale
self.time_offset = 1.0e-12 * w.header.time_offset
if channel_number == 1:
if self.enabled_and_selected:
self.points = len(w.data.ch1)
self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8)
if channel_number == 2:
if self.enabled_and_selected:
self.points = len(w.data.ch2)
self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8)
self.calc_times_and_volts()
def ds1000e(self, w, channel_number):
"""Interpret waveform data for 1000D and 1000E series scopes."""
self.roll_stop = w.header.roll_stop
if channel_number == 1:
self.time_offset = w.header.ch1_time_offset
self.time_scale = w.header.ch1_time_scale
if self.enabled_and_selected:
self.points = len(w.data.ch1)
self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8)
elif channel_number == 2:
self.time_offset = w.header.ch2_time_offset
self.time_scale = w.header.ch2_time_scale
if self.enabled_and_selected:
self.points = len(w.data.ch2)
self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8)
self.calc_times_and_volts()
def ds1000z(self, w, channel_number):
"""Interpret waveform for the Rigol DS1000Z series."""
self.time_scale = w.header.time_scale
self.time_offset = w.header.time_offset
self.points = w.header.points
self.stride = w.header.stride
self.firmware = w.preheader.firmware_version
self.probe = w.header.ch[channel_number-1].probe_value
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.y_scale = w.header.ch[channel_number-1].y_scale
self.y_offset = w.header.ch[channel_number-1].y_offset
if self.enabled_and_selected:
self.raw = _channel_bytes(channel_number, w)
self.points = len(self.raw)
self.calc_times_and_volts()
def ds2000(self, w, channel_number):
"""Interpret waveform for the Rigol DS2000 series."""
self.time_offset = w.header.time_offset
self.time_scale = w.header.time_scale
self.points = w.header.storage_depth
self.firmware = w.header.firmware_version
self.unit = UnitEnum(w.header.ch[channel_number-1].unit_actual)
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.y_scale = -self.volt_scale
self.y_offset = self.volt_offset
if self.enabled_and_selected:
if channel_number == 1:
self.raw = np.frombuffer(w.header.raw_1, dtype=np.uint8)
if channel_number == 2:
self.raw = np.frombuffer(w.header.raw_2, dtype=np.uint8)
if channel_number == 3:
self.raw = np.frombuffer(w.header.raw_3, dtype=np.uint8)
if channel_number == 4:
self.raw = np.frombuffer(w.header.raw_4, dtype=np.uint8)
self.calc_times_and_volts()
def ds4000(self, w, channel_number):
"""Interpret waveform for the Rigol DS4000 series."""
self.time_offset = w.header.time_offset
self.time_scale = w.header.time_scale
self.points = w.header.points
self.firmware = w.header.firmware_version
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.y_scale = -self.volt_scale
self.y_offset = self.volt_offset
if self.enabled_and_selected:
if channel_number == 1:
self.raw = np.frombuffer(w.header.raw_1, dtype=np.uint8)
if channel_number == 2:
self.raw = np.frombuffer(w.header.raw_2, dtype=np.uint8)
if channel_number == 3:
self.raw = np.frombuffer(w.header.raw_3, dtype=np.uint8)
if channel_number == 4:
self.raw = np.frombuffer(w.header.raw_4, dtype=np.uint8)
self.calc_times_and_volts()
def ds6000(self, w, channel_number):
"""Interpret waveform for the Rigol DS6000 series."""
self.time_offset = w.header.time_offset
self.time_scale = w.header.time_scale
self.points = w.header.points
self.firmware = w.header.firmware_version
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.unit = w.header.ch[channel_number-1].unit
if self.enabled_and_selected:
if channel_number == 1:
self.raw = np.array(w.header.raw_1, dtype=np.uint8)
if channel_number == 2:
self.raw = np.array(w.header.raw_2, dtype=np.uint8)
if channel_number == 3:
self.raw = np.array(w.header.raw_3, dtype=np.uint8)
if channel_number == 4:
self.raw = np.array(w.header.raw_4, dtype=np.uint8)
self.calc_times_and_volts()
| [((86, 11, 86, 52), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((195, 25, 195, 56), 'numpy.linspace', 'np.linspace', ({(195, 37, 195, 39): '(-h)', (195, 41, 195, 42): 'h', (195, 44, 195, 55): 'self.points'}, {}), '(-h, h, self.points)', True, 'import numpy as np\n'), ((205, 27, 205, 68), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((210, 27, 210, 68), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((222, 27, 222, 68), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((227, 27, 227, 68), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((241, 27, 241, 68), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((283, 27, 283, 72), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((286, 27, 286, 72), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((289, 27, 289, 72), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((292, 27, 292, 72), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((309, 27, 309, 72), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((312, 27, 312, 72), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((315, 27, 315, 72), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((318, 27, 318, 72), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n'), ((334, 27, 334, 67), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((337, 27, 337, 67), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((340, 27, 340, 67), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((343, 27, 343, 67), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((248, 27, 248, 68), 'numpy.frombuffer', 'np.frombuffer', (), '', True, 'import numpy as np\n')] |
esf-bt2020/mmdetection | configs/raubtierv2a/faster_rcnn_x101_64x4d_fpn_1x_raubtierv2a_nofreeze_4gpu.py | abc5fe060e0fcb716f845c85441be3741b22d3cf | _base_ = '../faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
num_stages=4,
#frozen_stages=4
),
roi_head=dict(
bbox_head=dict(
num_classes=3
)
)
)
dataset_type = 'COCODataset'
classes = ('luchs', 'rotfuchs', 'wolf')
data = dict(
train=dict(
img_prefix='raubtierv2a/train/',
classes=classes,
ann_file='raubtierv2a/train/_annotations.coco.json'),
val=dict(
img_prefix='raubtierv2a/valid/',
classes=classes,
ann_file='raubtierv2a/valid/_annotations.coco.json'),
test=dict(
img_prefix='raubtierv2a/test/',
classes=classes,
ann_file='raubtierv2a/test/_annotations.coco.json'))
#optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) #original (8x2=16)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) #(4x2=8) 4 GPUs
#optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001) #(1x2=2)
total_epochs=24
evaluation = dict(classwise=True, interval=1, metric='bbox')
work_dir = '/media/storage1/projects/WilLiCam/checkpoint_workdir/raubtierv2a/faster_rcnn_x101_64x4d_fpn_1x_raubtierv2a_nofreeze_4gpu'
#http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth
load_from = 'checkpoints/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth'
| [] |
wbaweto/QConf | driver/python/setup.py | 977a53d601eab2055fd8fb344b92f4026d178ad5 | from distutils.core import setup, Extension
setup(name = 'qconf_py', version = '1.2.2', ext_modules = [Extension('qconf_py', ['lib/python_qconf.cc'],
include_dirs=['/usr/local/include/qconf'],
extra_objects=['/usr/local/qconf/lib/libqconf.a']
)])
| [((2, 59, 5, 6), 'distutils.core.Extension', 'Extension', (), '', False, 'from distutils.core import setup, Extension\n')] |
Lockdef/kyopro-code | abc153/d.py | 2d943a87987af05122c556e173e5108a0c1c77c8 | h = int(input())
i = 1
a = 1
b = 1
c = 1
while h >= a:
a = 2 ** i
i += 1
s = 0
t = True
for j in range(1, i-1):
c += 2 ** j
print(c)
| [] |
Natureshadow/OpenGoPro | demos/python/sdk_wireless_camera_control/open_gopro/demos/log_battery.py | 05110123cfbf6584288b813f2d4896d3a091480e | # log_battery.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Wed, Sep 1, 2021 5:05:45 PM
"""Example to continuously read the battery (with no Wifi connection)"""
import csv
import time
import logging
import argparse
import threading
from pathlib import Path
from datetime import datetime
from dataclasses import dataclass
from typing import Optional, Tuple, Literal, List
from rich.console import Console
from open_gopro import GoPro
from open_gopro.constants import StatusId
from open_gopro.util import setup_logging, set_logging_level
logger = logging.getLogger(__name__)
console = Console() # rich consoler printer
BarsType = Literal[0, 1, 2, 3]
@dataclass
class Sample:
"""Simple class to store battery samples"""
index: int
percentage: int
bars: BarsType
def __post_init__(self) -> None:
self.time = datetime.now()
def __str__(self) -> str: # pylint: disable=missing-return-doc
return f"Index {self.index} @ time {self.time.strftime('%H:%M:%S')} --> bars: {self.bars}, percentage: {self.percentage}"
SAMPLE_INDEX = 0
SAMPLES: List[Sample] = []
def dump_results_as_csv(location: Path) -> None:
"""Write all of the samples to a csv file
Args:
location (Path): File to write to
"""
console.print(f"Dumping results as CSV to {location}")
with open(location, mode="w") as f:
w = csv.writer(f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
w.writerow(["index", "time", "percentage", "bars"])
initial_time = SAMPLES[0].time
for s in SAMPLES:
w.writerow([s.index, (s.time - initial_time).seconds, s.percentage, s.bars])
def process_battery_notifications(gopro: GoPro, initial_bars: BarsType, initial_percentage: int) -> None:
"""Separate thread to continuously check for and store battery notifications.
If the CLI parameter was set to poll, this isn't used.
Args:
gopro (GoPro): instance to get updates from
initial_bars (BarsType): Initial bars level when notifications were enabled
initial_percentage (int): Initial percentage when notifications were enabled
"""
last_percentage = initial_percentage
last_bars = initial_bars
while True:
# Block until we receive an update
notification = gopro.get_update()
# Update data points if they have changed
last_percentage = (
notification.data[StatusId.INT_BATT_PER]
if StatusId.INT_BATT_PER in notification.data
else last_percentage
)
last_bars = (
notification.data[StatusId.BATT_LEVEL] if StatusId.BATT_LEVEL in notification.data else last_bars
)
# Append and print sample
global SAMPLE_INDEX
SAMPLES.append(Sample(index=SAMPLE_INDEX, percentage=last_percentage, bars=last_bars))
console.print(str(SAMPLES[-1]))
SAMPLE_INDEX += 1
def main() -> int:
"""Main program functionality
Returns:
int: program return code
"""
identifier, log_location, poll = parse_arguments()
global logger
logger = setup_logging(logger, log_location)
global SAMPLE_INDEX
gopro: Optional[GoPro] = None
return_code = 0
try:
with GoPro(identifier, enable_wifi=False) as gopro:
set_logging_level(logger, logging.ERROR)
# # Setup notifications if we are not polling
if poll is None:
console.print("Configuring battery notifications...")
# Enable notifications of the relevant battery statuses. Also store initial values.
bars = gopro.ble_status.batt_level.register_value_update().flatten
percentage = gopro.ble_status.int_batt_per.register_value_update().flatten
# Start a thread to handle asynchronous battery level notifications
threading.Thread(
target=process_battery_notifications, args=(gopro, bars, percentage), daemon=True
).start()
with console.status("[bold green]Receiving battery notifications until it dies..."):
# Sleep forever, allowing notification handler thread to deal with battery level notifications
while True:
time.sleep(1)
# Otherwise, poll
else:
with console.status("[bold green]Polling the battery until it dies..."):
while True:
SAMPLES.append(
Sample(
index=SAMPLE_INDEX,
percentage=gopro.ble_status.int_batt_per.get_value().flatten,
bars=gopro.ble_status.batt_level.get_value().flatten,
)
)
console.print(str(SAMPLES[-1]))
SAMPLE_INDEX += 1
time.sleep(poll)
except Exception as e: # pylint: disable=broad-except
logger.error(repr(e))
return_code = 1
except KeyboardInterrupt:
logger.warning("Received keyboard interrupt. Shutting down...")
finally:
if len(SAMPLES) > 0:
csv_location = Path(log_location.parent) / "battery_results.csv"
dump_results_as_csv(csv_location)
if gopro is not None:
gopro.close()
console.print("Exiting...")
return return_code # pylint: disable=lost-exception
def parse_arguments() -> Tuple[str, Path, Optional[int]]:
"""Parse command line arguments
Returns:
Tuple[str, Path, Path]: (identifier, path to save log, path to VLC)
"""
parser = argparse.ArgumentParser(
description="Connect to the GoPro via BLE only and continuously read the battery (either by polling or notifications)."
)
parser.add_argument(
"-i",
"--identifier",
type=str,
help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default camera SSID. \
If not used, first discovered GoPro will be connected to",
default=None,
)
parser.add_argument(
"-l",
"--log",
type=Path,
help="Location to store detailed log",
default="log_battery.log",
)
parser.add_argument(
"-p",
"--poll",
type=int,
help="Set to poll the battery at a given interval. If not set, battery level will be notified instead. Defaults to notifications.",
default=None,
)
args = parser.parse_args()
return args.identifier, args.log, args.poll
if __name__ == "__main__":
main()
| [((22, 9, 22, 36), 'logging.getLogger', 'logging.getLogger', ({(22, 27, 22, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((23, 10, 23, 19), 'rich.console.Console', 'Console', ({}, {}), '()', False, 'from rich.console import Console\n'), ((102, 13, 102, 48), 'open_gopro.util.setup_logging', 'setup_logging', ({(102, 27, 102, 33): 'logger', (102, 35, 102, 47): 'log_location'}, {}), '(logger, log_location)', False, 'from open_gopro.util import setup_logging, set_logging_level\n'), ((162, 13, 164, 5), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((37, 20, 37, 34), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((55, 12, 55, 82), 'csv.writer', 'csv.writer', (), '', False, 'import csv\n'), ((109, 13, 109, 49), 'open_gopro.GoPro', 'GoPro', (), '', False, 'from open_gopro import GoPro\n'), ((110, 12, 110, 52), 'open_gopro.util.set_logging_level', 'set_logging_level', ({(110, 30, 110, 36): 'logger', (110, 38, 110, 51): 'logging.ERROR'}, {}), '(logger, logging.ERROR)', False, 'from open_gopro.util import setup_logging, set_logging_level\n'), ((148, 27, 148, 52), 'pathlib.Path', 'Path', ({(148, 32, 148, 51): 'log_location.parent'}, {}), '(log_location.parent)', False, 'from pathlib import Path\n'), ((119, 16, 121, 17), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((125, 24, 125, 37), 'time.sleep', 'time.sleep', ({(125, 35, 125, 36): '(1)'}, {}), '(1)', False, 'import time\n'), ((139, 24, 139, 40), 'time.sleep', 'time.sleep', ({(139, 35, 139, 39): 'poll'}, {}), '(poll)', False, 'import time\n')] |
mcroydon/django-tumbleweed | tumbleweed/models.py | 3f1eab2bf12350a91ca38165efec0c221a1fe69a | # These are not the droids you are looking for. | [] |
wathsalav/xos | xos/hpc_observer/steps/sync_originserver.py | f6bcaa37a948ee41729236afe7fce0802e002404 | import os
import sys
import base64
from django.db.models import F, Q
from xos.config import Config
from observer.syncstep import SyncStep
from core.models import Service
from hpc.models import ServiceProvider, ContentProvider, CDNPrefix, OriginServer
from util.logger import Logger, logging
# hpclibrary will be in steps/..
parentdir = os.path.join(os.path.dirname(__file__),"..")
sys.path.insert(0,parentdir)
from hpclib import HpcLibrary
logger = Logger(level=logging.INFO)
class SyncOriginServer(SyncStep, HpcLibrary):
provides=[OriginServer]
requested_interval=0
def __init__(self, **args):
SyncStep.__init__(self, **args)
HpcLibrary.__init__(self)
def fetch_pending(self, deleted):
#self.consistency_check()
return SyncStep.fetch_pending(self, deleted)
def consistency_check(self):
# set to true if something changed
result=False
# sanity check to make sure our PS objects have CMI objects behind them
all_ors_ids = [x["origin_server_id"] for x in self.client.onev.ListAll("OriginServer")]
for ors in OriginServer.objects.all():
if (ors.origin_server_id is not None) and (ors.origin_server_id not in all_ors_ids):
# we have an origin server ID, but it doesn't exist in the CMI
# something went wrong
# start over
logger.info("origin server %s was not found on CMI" % ors.origin_server_id)
ors.origin_server_id=None
ors.save()
result = True
return result
def sync_record(self, ors):
logger.info("sync'ing origin server %s" % str(ors))
if (not ors.contentProvider) or (not ors.contentProvider.content_provider_id):
return
cpid = ors.contentProvider.content_provider_id
# validation requires URL start with http://
url = ors.url
if not url.startswith("http://"):
url = "http://" + url
ors_dict = {"authenticated_content": ors.authenticated, "zone_redirects": ors.redirects, "content_provider_id": cpid, "url": url, "service_type": "HyperCache", "caching_type": "Optimistic", "description": ors.description}
#print os_dict
if not ors.origin_server_id:
id = self.client.onev.Create("OriginServer", ors_dict)
ors.origin_server_id = id
else:
self.client.onev.Update("OriginServer", ors.origin_server_id, ors_dict)
# ... something breaks (analytics) if the URL starts with http://, so we
# change it in cob after we added it via onev.
url = url[7:]
self.client.cob.UpdateContent(ors.origin_server_id, {"url": url})
ors.silent = True
ors.save()
def delete_record(self, m):
if m.origin_server_id is not None:
self.client.onev.Delete("OriginServer", m.origin_server_id)
| [((14, 0, 14, 28), 'sys.path.insert', 'sys.path.insert', ({(14, 16, 14, 17): '(0)', (14, 18, 14, 27): 'parentdir'}, {}), '(0, parentdir)', False, 'import sys\n'), ((18, 9, 18, 35), 'util.logger.Logger', 'Logger', (), '', False, 'from util.logger import Logger, logging\n'), ((13, 25, 13, 50), 'os.path.dirname', 'os.path.dirname', ({(13, 41, 13, 49): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((25, 8, 25, 39), 'observer.syncstep.SyncStep.__init__', 'SyncStep.__init__', ({(25, 26, 25, 30): 'self'}, {}), '(self, **args)', False, 'from observer.syncstep import SyncStep\n'), ((26, 8, 26, 33), 'hpclib.HpcLibrary.__init__', 'HpcLibrary.__init__', ({(26, 28, 26, 32): 'self'}, {}), '(self)', False, 'from hpclib import HpcLibrary\n'), ((31, 15, 31, 52), 'observer.syncstep.SyncStep.fetch_pending', 'SyncStep.fetch_pending', ({(31, 38, 31, 42): 'self', (31, 44, 31, 51): 'deleted'}, {}), '(self, deleted)', False, 'from observer.syncstep import SyncStep\n'), ((39, 19, 39, 45), 'hpc.models.OriginServer.objects.all', 'OriginServer.objects.all', ({}, {}), '()', False, 'from hpc.models import ServiceProvider, ContentProvider, CDNPrefix, OriginServer\n')] |
aroxby/pixel-processor | main.py | 9cfe260a085ced0883ce8b0a35c28020f4aa8737 | #!/usr/bin/env python3
from PIL import Image
def tranform(r, g, b):
tmp = b
b = g // 2
g = tmp
r = r // 2
return r, g, b
def main():
im = Image.open('blue-flames.jpg')
input_pixels = im.getdata()
output_pixels = tuple(tranform(*pixel) for pixel in input_pixels)
im.putdata(output_pixels)
im.save('green-flames.png')
if __name__ == '__main__':
main()
| [((14, 9, 14, 38), 'PIL.Image.open', 'Image.open', ({(14, 20, 14, 37): '"""blue-flames.jpg"""'}, {}), "('blue-flames.jpg')", False, 'from PIL import Image\n')] |
lesserwhirls/scipy-cwt | scipy/weave/examples/swig2_example.py | ee673656d879d9356892621e23ed0ced3d358621 | """Simple example to show how to use weave.inline on SWIG2 wrapped
objects. SWIG2 refers to SWIG versions >= 1.3.
To run this example you must build the trivial SWIG2 extension called
swig2_ext. To do this you need to do something like this::
$ swig -c++ -python -I. -o swig2_ext_wrap.cxx swig2_ext.i
$ g++ -Wall -O2 -I/usr/include/python2.3 -fPIC -I. -c \
-o swig2_ext_wrap.os swig2_ext_wrap.cxx
$ g++ -shared -o _swig2_ext.so swig2_ext_wrap.os \
-L/usr/lib/python2.3/config
The files swig2_ext.i and swig2_ext.h are included in the same
directory that contains this file.
Note that weave's SWIG2 support works fine whether SWIG_COBJECT_TYPES
are used or not.
Author: Prabhu Ramachandran
Copyright (c) 2004, Prabhu Ramachandran
License: BSD Style.
"""
# Import our SWIG2 wrapped library
import swig2_ext
import scipy.weave as weave
from scipy.weave import swig2_spec, converters
# SWIG2 support is not enabled by default. We do this by adding the
# swig2 converter to the default list of converters.
converters.default.insert(0, swig2_spec.swig2_converter())
def test():
"""Instantiate the SWIG wrapped object and then call its method
from C++ using weave.inline
"""
a = swig2_ext.A()
b = swig2_ext.foo() # This will be an APtr instance.
b.thisown = 1 # Prevent memory leaks.
code = """a->f();
b->f();
"""
weave.inline(code, ['a', 'b'], include_dirs=['.'],
headers=['"swig2_ext.h"'], verbose=1)
if __name__ == "__main__":
test()
| [((35, 29, 35, 57), 'scipy.weave.swig2_spec.swig2_converter', 'swig2_spec.swig2_converter', ({}, {}), '()', False, 'from scipy.weave import swig2_spec, converters\n'), ((42, 8, 42, 21), 'swig2_ext.A', 'swig2_ext.A', ({}, {}), '()', False, 'import swig2_ext\n'), ((43, 8, 43, 23), 'swig2_ext.foo', 'swig2_ext.foo', ({}, {}), '()', False, 'import swig2_ext\n'), ((48, 4, 49, 54), 'scipy.weave.inline', 'weave.inline', (), '', True, 'import scipy.weave as weave\n')] |
denghz/Probabilistic-Programming | src/simplify.py | fa505a75c4558e507fd3effd2737c63537bfe50d | from wolframclient.language.expression import WLSymbol
from nnDiff import *
def parseGlobalSymbol(s):
if isinstance(s, numbers.Number):
return s
if isinstance(s, WLSymbol):
if s.name == 'E':
return 'E'
else:
return s.name[7:]
def parse(exp):
symbol = parseGlobalSymbol(exp)
if symbol:
return [symbol]
else:
f = str(exp.head)
args = list(map(parse, exp.args))
res = []
if (f == "Power"):
res1 = []
p = args[1][0]
e = args[0]
if e == ['E']:
return ['Exp'] + args[1]
if p < 0:
res = ["Inv"]
p = -p
if p >= 2:
p = p - 2
res1 = ["Times"] + e + e
while p > 0:
p = p - 1
res1 = ["Times"] + res1 + e
return res + res1
else:
return res + e
else:
if len(args) == 1:
return [f] + args[0]
elif len(args) >= 2:
res = [f] + args[0] + args[1]
args = args[2:]
for arg in args:
res = [f] + res + arg
return res
def simplify(exp):
with WolframLanguageSession() as session:
session.evaluate("Inv[zzz_] := 1/zzz")
f = wlexpr(str(Func(exp)))
getfreeVars = wlexpr("Reduce`FreeVariables")
freeVariables = session.evaluate(getfreeVars(f))
ass = wl.Element(wl.Alternatives(freeVariables), wl.Reals)
wmres = session.evaluate(wl.FullSimplify(f,ass))
print(wmres)
res = parse(wmres)
return res
if __name__ == "__main__":
exp = sys.argv[1:]
if exp == []:
exp = ["Sin", "x"]
res = map(str,simplify(exp))
print(' '.join(res), file=sys.stderr) | [] |
EdWard680/python-firetv | setup.py | 4c02f79a1c8ae60a489297178d010a31545a3b5d | from setuptools import setup
setup(
name='firetv',
version='1.0.7',
description='Communicate with an Amazon Fire TV device via ADB over a network.',
url='https://github.com/happyleavesaoc/python-firetv/',
license='MIT',
author='happyleaves',
author_email='[email protected]',
packages=['firetv'],
install_requires=['pycryptodome', 'rsa', 'adb-homeassistant', 'pure-python-adb-homeassistant'],
extras_require={
'firetv-server': ['Flask>=0.10.1', 'PyYAML>=3.12']
},
entry_points={
'console_scripts': [
'firetv-server = firetv.__main__:main'
]
},
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
]
)
| [((3, 0, 27, 1), 'setuptools.setup', 'setup', (), '', False, 'from setuptools import setup\n')] |
Mario-Kart-Felix/python-neo | neo/io/exampleio.py | 951c97cf9eb56f5489da88940de920329e0f4c1b | """
neo.io have been split in 2 level API:
* neo.io: this API give neo object
* neo.rawio: this API give raw data as they are in files.
Developper are encourage to use neo.rawio.
When this is done the neo.io is done automagically with
this king of following code.
Author: sgarcia
"""
from neo.io.basefromrawio import BaseFromRaw
from neo.rawio.examplerawio import ExampleRawIO
class ExampleIO(ExampleRawIO, BaseFromRaw):
name = 'example IO'
description = "Fake IO"
# This is an inportant choice when there are several channels.
# 'split-all' : 1 AnalogSignal each 1 channel
# 'group-by-same-units' : one 2D AnalogSignal for each group of channel with same units
_prefered_signal_group_mode = 'group-by-same-units'
def __init__(self, filename=''):
ExampleRawIO.__init__(self, filename=filename)
BaseFromRaw.__init__(self, filename)
| [((29, 8, 29, 54), 'neo.rawio.examplerawio.ExampleRawIO.__init__', 'ExampleRawIO.__init__', (), '', False, 'from neo.rawio.examplerawio import ExampleRawIO\n'), ((30, 8, 30, 44), 'neo.io.basefromrawio.BaseFromRaw.__init__', 'BaseFromRaw.__init__', ({(30, 29, 30, 33): 'self', (30, 35, 30, 43): 'filename'}, {}), '(self, filename)', False, 'from neo.io.basefromrawio import BaseFromRaw\n')] |
sap9433/Distributed-Multi-User-Scrapy-System-with-a-Web-UI | scrapyproject/migrations/0003_auto_20170209_1025.py | 0676f7599f288409d0faf7b6211c171ce8c46a7a | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scrapyproject', '0002_auto_20170208_1738'),
]
operations = [
migrations.AlterField(
model_name='project',
name='link_generator',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='project',
name='scraper_function',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='project',
name='settings',
field=models.TextField(blank=True),
),
]
| [((17, 18, 17, 46), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((22, 18, 22, 46), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((27, 18, 27, 46), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n')] |
cbsBiram/xarala__ssr | src/cart/forms.py | 863e1362c786daa752b942b796f7a015211d2f1b | from django import forms
from django.utils.translation import gettext_lazy as _
COURSE_QUANTITY_CHOICES = [(i, str(i)) for i in range(1, 21)]
class CartAddCourseForm(forms.Form):
quantity = forms.TypedChoiceField(
choices=COURSE_QUANTITY_CHOICES, coerce=int, label=_("Quantité")
)
override = forms.BooleanField(
required=False, initial=False, widget=forms.HiddenInput
)
| [((12, 15, 14, 5), 'django.forms.BooleanField', 'forms.BooleanField', (), '', False, 'from django import forms\n'), ((10, 59, 10, 73), 'django.utils.translation.gettext_lazy', '_', ({(10, 61, 10, 72): '"""Quantité"""'}, {}), "('Quantité')", True, 'from django.utils.translation import gettext_lazy as _\n')] |
sflippl/patches | patches/datasets/__init__.py | c19889e676e231af44669a01c61854e9e5791227 | """Datasets of latent predictability tasks.
"""
from .pilgrimm import *
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.