code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def _GetArgsDescription(self, args_type):
"""Get a simplified description of the args_type for a flow."""
args = {}
if args_type:
for type_descriptor in args_type.type_infos:
if not type_descriptor.hidden:
args[type_descriptor.name] = {
"description": type_descriptor.description,
"default": type_descriptor.default,
"type": "",
}
if type_descriptor.type:
args[type_descriptor.name]["type"] = type_descriptor.type.__name__
return args | def function[_GetArgsDescription, parameter[self, args_type]]:
constant[Get a simplified description of the args_type for a flow.]
variable[args] assign[=] dictionary[[], []]
if name[args_type] begin[:]
for taget[name[type_descriptor]] in starred[name[args_type].type_infos] begin[:]
if <ast.UnaryOp object at 0x7da1b1c18e20> begin[:]
call[name[args]][name[type_descriptor].name] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c19090>, <ast.Constant object at 0x7da1b1c1b910>, <ast.Constant object at 0x7da1b1c1ae30>], [<ast.Attribute object at 0x7da1b1c194b0>, <ast.Attribute object at 0x7da1b1c1b8b0>, <ast.Constant object at 0x7da1b1c18250>]]
if name[type_descriptor].type begin[:]
call[call[name[args]][name[type_descriptor].name]][constant[type]] assign[=] name[type_descriptor].type.__name__
return[name[args]] | keyword[def] identifier[_GetArgsDescription] ( identifier[self] , identifier[args_type] ):
literal[string]
identifier[args] ={}
keyword[if] identifier[args_type] :
keyword[for] identifier[type_descriptor] keyword[in] identifier[args_type] . identifier[type_infos] :
keyword[if] keyword[not] identifier[type_descriptor] . identifier[hidden] :
identifier[args] [ identifier[type_descriptor] . identifier[name] ]={
literal[string] : identifier[type_descriptor] . identifier[description] ,
literal[string] : identifier[type_descriptor] . identifier[default] ,
literal[string] : literal[string] ,
}
keyword[if] identifier[type_descriptor] . identifier[type] :
identifier[args] [ identifier[type_descriptor] . identifier[name] ][ literal[string] ]= identifier[type_descriptor] . identifier[type] . identifier[__name__]
keyword[return] identifier[args] | def _GetArgsDescription(self, args_type):
"""Get a simplified description of the args_type for a flow."""
args = {}
if args_type:
for type_descriptor in args_type.type_infos:
if not type_descriptor.hidden:
args[type_descriptor.name] = {'description': type_descriptor.description, 'default': type_descriptor.default, 'type': ''}
if type_descriptor.type:
args[type_descriptor.name]['type'] = type_descriptor.type.__name__ # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['type_descriptor']] # depends on [control=['if'], data=[]]
return args |
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:] | def function[is_private, parameter[prefix, base]]:
constant[prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
]
call[name[warnings].warn, parameter[constant[is_private is deprecated; it wasn't useful; examine DocTestFinder.find() lists instead], name[DeprecationWarning]]]
return[<ast.BoolOp object at 0x7da204344340>] | keyword[def] identifier[is_private] ( identifier[prefix] , identifier[base] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] ,
identifier[DeprecationWarning] , identifier[stacklevel] = literal[int] )
keyword[return] identifier[base] [: literal[int] ]== literal[string] keyword[and] keyword[not] identifier[base] [: literal[int] ]== literal[string] == identifier[base] [- literal[int] :] | def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
"""
warnings.warn("is_private is deprecated; it wasn't useful; examine DocTestFinder.find() lists instead", DeprecationWarning, stacklevel=2)
return base[:1] == '_' and (not base[:2] == '__' == base[-2:]) |
def format_errors(errors, indent=0, prefix='', suffix=''):
"""
string: "example"
"example"
dict:
"example":
-
"""
if is_single_item_iterable(errors):
errors = errors[0]
if isinstance(errors, SINGULAR_TYPES):
yield indent_message(repr(errors), indent, prefix=prefix, suffix=suffix)
elif isinstance(errors, collections.Mapping):
for key, value in errors.items():
assert isinstance(key, SINGULAR_TYPES), type(key)
if isinstance(value, SINGULAR_TYPES):
message = "{0}: {1}".format(repr(key), repr(value))
yield indent_message(message, indent, prefix=prefix, suffix=suffix)
else:
yield indent_message(repr(key), indent, prefix=prefix, suffix=':')
for message in format_errors(value, indent + 4, prefix='- '):
yield message
elif is_non_string_iterable(errors):
# for making the rhs of the numbers line up
extra_indent = int(math.ceil(math.log10(len(errors)))) + 2
for index, value in enumerate(errors):
list_prefix = "{0}. ".format(index)
messages = format_errors(
value,
indent=indent + extra_indent - len(list_prefix),
prefix=list_prefix,
)
for message in messages:
yield message
else:
assert False, "should not be possible" | def function[format_errors, parameter[errors, indent, prefix, suffix]]:
constant[
string: "example"
"example"
dict:
"example":
-
]
if call[name[is_single_item_iterable], parameter[name[errors]]] begin[:]
variable[errors] assign[=] call[name[errors]][constant[0]]
if call[name[isinstance], parameter[name[errors], name[SINGULAR_TYPES]]] begin[:]
<ast.Yield object at 0x7da18bc73130> | keyword[def] identifier[format_errors] ( identifier[errors] , identifier[indent] = literal[int] , identifier[prefix] = literal[string] , identifier[suffix] = literal[string] ):
literal[string]
keyword[if] identifier[is_single_item_iterable] ( identifier[errors] ):
identifier[errors] = identifier[errors] [ literal[int] ]
keyword[if] identifier[isinstance] ( identifier[errors] , identifier[SINGULAR_TYPES] ):
keyword[yield] identifier[indent_message] ( identifier[repr] ( identifier[errors] ), identifier[indent] , identifier[prefix] = identifier[prefix] , identifier[suffix] = identifier[suffix] )
keyword[elif] identifier[isinstance] ( identifier[errors] , identifier[collections] . identifier[Mapping] ):
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[errors] . identifier[items] ():
keyword[assert] identifier[isinstance] ( identifier[key] , identifier[SINGULAR_TYPES] ), identifier[type] ( identifier[key] )
keyword[if] identifier[isinstance] ( identifier[value] , identifier[SINGULAR_TYPES] ):
identifier[message] = literal[string] . identifier[format] ( identifier[repr] ( identifier[key] ), identifier[repr] ( identifier[value] ))
keyword[yield] identifier[indent_message] ( identifier[message] , identifier[indent] , identifier[prefix] = identifier[prefix] , identifier[suffix] = identifier[suffix] )
keyword[else] :
keyword[yield] identifier[indent_message] ( identifier[repr] ( identifier[key] ), identifier[indent] , identifier[prefix] = identifier[prefix] , identifier[suffix] = literal[string] )
keyword[for] identifier[message] keyword[in] identifier[format_errors] ( identifier[value] , identifier[indent] + literal[int] , identifier[prefix] = literal[string] ):
keyword[yield] identifier[message]
keyword[elif] identifier[is_non_string_iterable] ( identifier[errors] ):
identifier[extra_indent] = identifier[int] ( identifier[math] . identifier[ceil] ( identifier[math] . identifier[log10] ( identifier[len] ( identifier[errors] ))))+ literal[int]
keyword[for] identifier[index] , identifier[value] keyword[in] identifier[enumerate] ( identifier[errors] ):
identifier[list_prefix] = literal[string] . identifier[format] ( identifier[index] )
identifier[messages] = identifier[format_errors] (
identifier[value] ,
identifier[indent] = identifier[indent] + identifier[extra_indent] - identifier[len] ( identifier[list_prefix] ),
identifier[prefix] = identifier[list_prefix] ,
)
keyword[for] identifier[message] keyword[in] identifier[messages] :
keyword[yield] identifier[message]
keyword[else] :
keyword[assert] keyword[False] , literal[string] | def format_errors(errors, indent=0, prefix='', suffix=''):
"""
string: "example"
"example"
dict:
"example":
-
"""
if is_single_item_iterable(errors):
errors = errors[0] # depends on [control=['if'], data=[]]
if isinstance(errors, SINGULAR_TYPES):
yield indent_message(repr(errors), indent, prefix=prefix, suffix=suffix) # depends on [control=['if'], data=[]]
elif isinstance(errors, collections.Mapping):
for (key, value) in errors.items():
assert isinstance(key, SINGULAR_TYPES), type(key)
if isinstance(value, SINGULAR_TYPES):
message = '{0}: {1}'.format(repr(key), repr(value))
yield indent_message(message, indent, prefix=prefix, suffix=suffix) # depends on [control=['if'], data=[]]
else:
yield indent_message(repr(key), indent, prefix=prefix, suffix=':')
for message in format_errors(value, indent + 4, prefix='- '):
yield message # depends on [control=['for'], data=['message']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif is_non_string_iterable(errors):
# for making the rhs of the numbers line up
extra_indent = int(math.ceil(math.log10(len(errors)))) + 2
for (index, value) in enumerate(errors):
list_prefix = '{0}. '.format(index)
messages = format_errors(value, indent=indent + extra_indent - len(list_prefix), prefix=list_prefix)
for message in messages:
yield message # depends on [control=['for'], data=['message']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
assert False, 'should not be possible' |
def init(self):
"""Init the connection to the Cassandra server."""
if not self.export_enable:
return None
# if username and/or password are not set the connection will try to connect with no auth
auth_provider = PlainTextAuthProvider(
username=self.username, password=self.password)
# Cluster
try:
cluster = Cluster([self.host],
port=int(self.port),
protocol_version=int(self.protocol_version),
auth_provider=auth_provider)
session = cluster.connect()
except Exception as e:
logger.critical("Cannot connect to Cassandra cluster '%s:%s' (%s)" % (self.host, self.port, e))
sys.exit(2)
# Keyspace
try:
session.set_keyspace(self.keyspace)
except InvalidRequest as e:
logger.info("Create keyspace {} on the Cassandra cluster".format(self.keyspace))
c = "CREATE KEYSPACE %s WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '%s' }" % (self.keyspace, self.replication_factor)
session.execute(c)
session.set_keyspace(self.keyspace)
logger.info(
"Stats will be exported to Cassandra cluster {} ({}) in keyspace {}".format(
cluster.metadata.cluster_name, cluster.metadata.all_hosts(), self.keyspace))
# Table
try:
session.execute("CREATE TABLE %s (plugin text, time timeuuid, stat map<text,float>, PRIMARY KEY (plugin, time)) WITH CLUSTERING ORDER BY (time DESC)" % self.table)
except Exception:
logger.debug("Cassandra table %s already exist" % self.table)
return cluster, session | def function[init, parameter[self]]:
constant[Init the connection to the Cassandra server.]
if <ast.UnaryOp object at 0x7da18f09d660> begin[:]
return[constant[None]]
variable[auth_provider] assign[=] call[name[PlainTextAuthProvider], parameter[]]
<ast.Try object at 0x7da18f09f250>
<ast.Try object at 0x7da18f09df30>
call[name[logger].info, parameter[call[constant[Stats will be exported to Cassandra cluster {} ({}) in keyspace {}].format, parameter[name[cluster].metadata.cluster_name, call[name[cluster].metadata.all_hosts, parameter[]], name[self].keyspace]]]]
<ast.Try object at 0x7da18f09eda0>
return[tuple[[<ast.Name object at 0x7da18dc05120>, <ast.Name object at 0x7da18dc06b30>]]] | keyword[def] identifier[init] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[export_enable] :
keyword[return] keyword[None]
identifier[auth_provider] = identifier[PlainTextAuthProvider] (
identifier[username] = identifier[self] . identifier[username] , identifier[password] = identifier[self] . identifier[password] )
keyword[try] :
identifier[cluster] = identifier[Cluster] ([ identifier[self] . identifier[host] ],
identifier[port] = identifier[int] ( identifier[self] . identifier[port] ),
identifier[protocol_version] = identifier[int] ( identifier[self] . identifier[protocol_version] ),
identifier[auth_provider] = identifier[auth_provider] )
identifier[session] = identifier[cluster] . identifier[connect] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[critical] ( literal[string] %( identifier[self] . identifier[host] , identifier[self] . identifier[port] , identifier[e] ))
identifier[sys] . identifier[exit] ( literal[int] )
keyword[try] :
identifier[session] . identifier[set_keyspace] ( identifier[self] . identifier[keyspace] )
keyword[except] identifier[InvalidRequest] keyword[as] identifier[e] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[keyspace] ))
identifier[c] = literal[string] %( identifier[self] . identifier[keyspace] , identifier[self] . identifier[replication_factor] )
identifier[session] . identifier[execute] ( identifier[c] )
identifier[session] . identifier[set_keyspace] ( identifier[self] . identifier[keyspace] )
identifier[logger] . identifier[info] (
literal[string] . identifier[format] (
identifier[cluster] . identifier[metadata] . identifier[cluster_name] , identifier[cluster] . identifier[metadata] . identifier[all_hosts] (), identifier[self] . identifier[keyspace] ))
keyword[try] :
identifier[session] . identifier[execute] ( literal[string] % identifier[self] . identifier[table] )
keyword[except] identifier[Exception] :
identifier[logger] . identifier[debug] ( literal[string] % identifier[self] . identifier[table] )
keyword[return] identifier[cluster] , identifier[session] | def init(self):
"""Init the connection to the Cassandra server."""
if not self.export_enable:
return None # depends on [control=['if'], data=[]]
# if username and/or password are not set the connection will try to connect with no auth
auth_provider = PlainTextAuthProvider(username=self.username, password=self.password)
# Cluster
try:
cluster = Cluster([self.host], port=int(self.port), protocol_version=int(self.protocol_version), auth_provider=auth_provider)
session = cluster.connect() # depends on [control=['try'], data=[]]
except Exception as e:
logger.critical("Cannot connect to Cassandra cluster '%s:%s' (%s)" % (self.host, self.port, e))
sys.exit(2) # depends on [control=['except'], data=['e']]
# Keyspace
try:
session.set_keyspace(self.keyspace) # depends on [control=['try'], data=[]]
except InvalidRequest as e:
logger.info('Create keyspace {} on the Cassandra cluster'.format(self.keyspace))
c = "CREATE KEYSPACE %s WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '%s' }" % (self.keyspace, self.replication_factor)
session.execute(c)
session.set_keyspace(self.keyspace) # depends on [control=['except'], data=[]]
logger.info('Stats will be exported to Cassandra cluster {} ({}) in keyspace {}'.format(cluster.metadata.cluster_name, cluster.metadata.all_hosts(), self.keyspace))
# Table
try:
session.execute('CREATE TABLE %s (plugin text, time timeuuid, stat map<text,float>, PRIMARY KEY (plugin, time)) WITH CLUSTERING ORDER BY (time DESC)' % self.table) # depends on [control=['try'], data=[]]
except Exception:
logger.debug('Cassandra table %s already exist' % self.table) # depends on [control=['except'], data=[]]
return (cluster, session) |
def Validate(self, value):
"""Validate the value.
Args:
value: Value is expected to be a dict-like object that a given RDFStruct
can be initialized from.
Raises:
TypeValueError: If the value is not a valid dict-like object that a given
RDFStruct can be initialized from.
Returns:
A valid instance of self.rdfclass or None.
"""
if value is None:
return None
if not isinstance(value, self.rdfclass):
# Try to coerce the type to the correct rdf_class.
try:
r = self.rdfclass()
r.FromDict(value)
return r
except (AttributeError, TypeError, rdfvalue.InitializeError):
# AttributeError is raised if value contains items that don't
# belong to the given rdfstruct.
# TypeError will be raised if value is not a dict-like object.
raise TypeValueError("Value for arg %s should be an %s" %
(self.name, self.rdfclass.__name__))
return value | def function[Validate, parameter[self, value]]:
constant[Validate the value.
Args:
value: Value is expected to be a dict-like object that a given RDFStruct
can be initialized from.
Raises:
TypeValueError: If the value is not a valid dict-like object that a given
RDFStruct can be initialized from.
Returns:
A valid instance of self.rdfclass or None.
]
if compare[name[value] is constant[None]] begin[:]
return[constant[None]]
if <ast.UnaryOp object at 0x7da18fe92200> begin[:]
<ast.Try object at 0x7da18fe93fa0>
return[name[value]] | keyword[def] identifier[Validate] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[self] . identifier[rdfclass] ):
keyword[try] :
identifier[r] = identifier[self] . identifier[rdfclass] ()
identifier[r] . identifier[FromDict] ( identifier[value] )
keyword[return] identifier[r]
keyword[except] ( identifier[AttributeError] , identifier[TypeError] , identifier[rdfvalue] . identifier[InitializeError] ):
keyword[raise] identifier[TypeValueError] ( literal[string] %
( identifier[self] . identifier[name] , identifier[self] . identifier[rdfclass] . identifier[__name__] ))
keyword[return] identifier[value] | def Validate(self, value):
"""Validate the value.
Args:
value: Value is expected to be a dict-like object that a given RDFStruct
can be initialized from.
Raises:
TypeValueError: If the value is not a valid dict-like object that a given
RDFStruct can be initialized from.
Returns:
A valid instance of self.rdfclass or None.
"""
if value is None:
return None # depends on [control=['if'], data=[]]
if not isinstance(value, self.rdfclass):
# Try to coerce the type to the correct rdf_class.
try:
r = self.rdfclass()
r.FromDict(value)
return r # depends on [control=['try'], data=[]]
except (AttributeError, TypeError, rdfvalue.InitializeError):
# AttributeError is raised if value contains items that don't
# belong to the given rdfstruct.
# TypeError will be raised if value is not a dict-like object.
raise TypeValueError('Value for arg %s should be an %s' % (self.name, self.rdfclass.__name__)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return value |
def add_library(self, name):
"""Add a library to the database
This method is for adding a library by name (eg: "BuiltIn")
rather than by a file.
"""
libdoc = LibraryDocumentation(name)
if len(libdoc.keywords) > 0:
# FIXME: figure out the path to the library file
collection_id = self.add_collection(None, libdoc.name, libdoc.type,
libdoc.doc, libdoc.version,
libdoc.scope, libdoc.named_args,
libdoc.doc_format)
self._load_keywords(collection_id, libdoc=libdoc) | def function[add_library, parameter[self, name]]:
constant[Add a library to the database
This method is for adding a library by name (eg: "BuiltIn")
rather than by a file.
]
variable[libdoc] assign[=] call[name[LibraryDocumentation], parameter[name[name]]]
if compare[call[name[len], parameter[name[libdoc].keywords]] greater[>] constant[0]] begin[:]
variable[collection_id] assign[=] call[name[self].add_collection, parameter[constant[None], name[libdoc].name, name[libdoc].type, name[libdoc].doc, name[libdoc].version, name[libdoc].scope, name[libdoc].named_args, name[libdoc].doc_format]]
call[name[self]._load_keywords, parameter[name[collection_id]]] | keyword[def] identifier[add_library] ( identifier[self] , identifier[name] ):
literal[string]
identifier[libdoc] = identifier[LibraryDocumentation] ( identifier[name] )
keyword[if] identifier[len] ( identifier[libdoc] . identifier[keywords] )> literal[int] :
identifier[collection_id] = identifier[self] . identifier[add_collection] ( keyword[None] , identifier[libdoc] . identifier[name] , identifier[libdoc] . identifier[type] ,
identifier[libdoc] . identifier[doc] , identifier[libdoc] . identifier[version] ,
identifier[libdoc] . identifier[scope] , identifier[libdoc] . identifier[named_args] ,
identifier[libdoc] . identifier[doc_format] )
identifier[self] . identifier[_load_keywords] ( identifier[collection_id] , identifier[libdoc] = identifier[libdoc] ) | def add_library(self, name):
"""Add a library to the database
This method is for adding a library by name (eg: "BuiltIn")
rather than by a file.
"""
libdoc = LibraryDocumentation(name)
if len(libdoc.keywords) > 0:
# FIXME: figure out the path to the library file
collection_id = self.add_collection(None, libdoc.name, libdoc.type, libdoc.doc, libdoc.version, libdoc.scope, libdoc.named_args, libdoc.doc_format)
self._load_keywords(collection_id, libdoc=libdoc) # depends on [control=['if'], data=[]] |
def windowed(a, windowsize, stepsize=None, dopad=False):
"""
Parameters
a - the input array to restructure into overlapping windows
windowsize - the size of each window of samples
stepsize - the number of samples to shift the window each step. If not
specified, this defaults to windowsize
dopad - If false (default), leftover samples are returned seperately.
If true, the input array is padded with zeros so that all
samples are used.
"""
if windowsize < 1:
raise ValueError('windowsize must be greater than or equal to one')
if stepsize is None:
stepsize = windowsize
if stepsize < 1:
raise ValueError('stepsize must be greater than or equal to one')
if not a.flags['C_CONTIGUOUS']:
a = a.copy()
if windowsize == 1 and stepsize == 1:
# A windowsize and stepsize of one mean that no windowing is necessary.
# Return the array unchanged.
return np.zeros((0,) + a.shape[1:], dtype=a.dtype), a
if windowsize == 1 and stepsize > 1:
return np.zeros(0, dtype=a.dtype), a[::stepsize]
# the original length of the input array
l = a.shape[0]
if dopad:
p = _wpad(l, windowsize, stepsize)
# pad the array with enough zeros so that there are no leftover samples
a = pad(a, p)
# no leftovers; an empty array
leftover = np.zeros((0,) + a.shape[1:], dtype=a.dtype)
else:
# cut the array so that any leftover samples are returned seperately
c, lc = _wcut(l, windowsize, stepsize)
leftover = a[lc:]
a = a[:c]
if 0 == a.shape[0]:
return leftover, np.zeros(a.shape, dtype=a.dtype)
n = 1 + (a.shape[0] - windowsize) // (stepsize)
s = a.strides[0]
newshape = (n, windowsize) + a.shape[1:]
newstrides = (stepsize * s, s) + a.strides[1:]
out = np.ndarray.__new__( \
np.ndarray,
strides=newstrides,
shape=newshape,
buffer=a,
dtype=a.dtype)
return leftover, out | def function[windowed, parameter[a, windowsize, stepsize, dopad]]:
constant[
Parameters
a - the input array to restructure into overlapping windows
windowsize - the size of each window of samples
stepsize - the number of samples to shift the window each step. If not
specified, this defaults to windowsize
dopad - If false (default), leftover samples are returned seperately.
If true, the input array is padded with zeros so that all
samples are used.
]
if compare[name[windowsize] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da1b19a8700>
if compare[name[stepsize] is constant[None]] begin[:]
variable[stepsize] assign[=] name[windowsize]
if compare[name[stepsize] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da1b19a89d0>
if <ast.UnaryOp object at 0x7da1b19a8b80> begin[:]
variable[a] assign[=] call[name[a].copy, parameter[]]
if <ast.BoolOp object at 0x7da1b19a8d90> begin[:]
return[tuple[[<ast.Call object at 0x7da1b19a8f40>, <ast.Name object at 0x7da1b19a9390>]]]
if <ast.BoolOp object at 0x7da1b19a9420> begin[:]
return[tuple[[<ast.Call object at 0x7da1b19a95d0>, <ast.Subscript object at 0x7da1b19a9720>]]]
variable[l] assign[=] call[name[a].shape][constant[0]]
if name[dopad] begin[:]
variable[p] assign[=] call[name[_wpad], parameter[name[l], name[windowsize], name[stepsize]]]
variable[a] assign[=] call[name[pad], parameter[name[a], name[p]]]
variable[leftover] assign[=] call[name[np].zeros, parameter[binary_operation[tuple[[<ast.Constant object at 0x7da1b19a86d0>]] + call[name[a].shape][<ast.Slice object at 0x7da1b19a8490>]]]]
if compare[constant[0] equal[==] call[name[a].shape][constant[0]]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b196a2c0>, <ast.Call object at 0x7da1b1968160>]]]
variable[n] assign[=] binary_operation[constant[1] + binary_operation[binary_operation[call[name[a].shape][constant[0]] - name[windowsize]] <ast.FloorDiv object at 0x7da2590d6bc0> name[stepsize]]]
variable[s] assign[=] call[name[a].strides][constant[0]]
variable[newshape] assign[=] binary_operation[tuple[[<ast.Name object at 0x7da1b196ab90>, <ast.Name object at 0x7da1b196b550>]] + call[name[a].shape][<ast.Slice object at 0x7da1b196b430>]]
variable[newstrides] assign[=] binary_operation[tuple[[<ast.BinOp object at 0x7da1b1968940>, <ast.Name object at 0x7da1b19680a0>]] + call[name[a].strides][<ast.Slice object at 0x7da1b19699f0>]]
variable[out] assign[=] call[name[np].ndarray.__new__, parameter[name[np].ndarray]]
return[tuple[[<ast.Name object at 0x7da1b196b4f0>, <ast.Name object at 0x7da1b196a530>]]] | keyword[def] identifier[windowed] ( identifier[a] , identifier[windowsize] , identifier[stepsize] = keyword[None] , identifier[dopad] = keyword[False] ):
literal[string]
keyword[if] identifier[windowsize] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[stepsize] keyword[is] keyword[None] :
identifier[stepsize] = identifier[windowsize]
keyword[if] identifier[stepsize] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[a] . identifier[flags] [ literal[string] ]:
identifier[a] = identifier[a] . identifier[copy] ()
keyword[if] identifier[windowsize] == literal[int] keyword[and] identifier[stepsize] == literal[int] :
keyword[return] identifier[np] . identifier[zeros] (( literal[int] ,)+ identifier[a] . identifier[shape] [ literal[int] :], identifier[dtype] = identifier[a] . identifier[dtype] ), identifier[a]
keyword[if] identifier[windowsize] == literal[int] keyword[and] identifier[stepsize] > literal[int] :
keyword[return] identifier[np] . identifier[zeros] ( literal[int] , identifier[dtype] = identifier[a] . identifier[dtype] ), identifier[a] [:: identifier[stepsize] ]
identifier[l] = identifier[a] . identifier[shape] [ literal[int] ]
keyword[if] identifier[dopad] :
identifier[p] = identifier[_wpad] ( identifier[l] , identifier[windowsize] , identifier[stepsize] )
identifier[a] = identifier[pad] ( identifier[a] , identifier[p] )
identifier[leftover] = identifier[np] . identifier[zeros] (( literal[int] ,)+ identifier[a] . identifier[shape] [ literal[int] :], identifier[dtype] = identifier[a] . identifier[dtype] )
keyword[else] :
identifier[c] , identifier[lc] = identifier[_wcut] ( identifier[l] , identifier[windowsize] , identifier[stepsize] )
identifier[leftover] = identifier[a] [ identifier[lc] :]
identifier[a] = identifier[a] [: identifier[c] ]
keyword[if] literal[int] == identifier[a] . identifier[shape] [ literal[int] ]:
keyword[return] identifier[leftover] , identifier[np] . identifier[zeros] ( identifier[a] . identifier[shape] , identifier[dtype] = identifier[a] . identifier[dtype] )
identifier[n] = literal[int] +( identifier[a] . identifier[shape] [ literal[int] ]- identifier[windowsize] )//( identifier[stepsize] )
identifier[s] = identifier[a] . identifier[strides] [ literal[int] ]
identifier[newshape] =( identifier[n] , identifier[windowsize] )+ identifier[a] . identifier[shape] [ literal[int] :]
identifier[newstrides] =( identifier[stepsize] * identifier[s] , identifier[s] )+ identifier[a] . identifier[strides] [ literal[int] :]
identifier[out] = identifier[np] . identifier[ndarray] . identifier[__new__] ( identifier[np] . identifier[ndarray] ,
identifier[strides] = identifier[newstrides] ,
identifier[shape] = identifier[newshape] ,
identifier[buffer] = identifier[a] ,
identifier[dtype] = identifier[a] . identifier[dtype] )
keyword[return] identifier[leftover] , identifier[out] | def windowed(a, windowsize, stepsize=None, dopad=False):
"""
Parameters
a - the input array to restructure into overlapping windows
windowsize - the size of each window of samples
stepsize - the number of samples to shift the window each step. If not
specified, this defaults to windowsize
dopad - If false (default), leftover samples are returned seperately.
If true, the input array is padded with zeros so that all
samples are used.
"""
if windowsize < 1:
raise ValueError('windowsize must be greater than or equal to one') # depends on [control=['if'], data=[]]
if stepsize is None:
stepsize = windowsize # depends on [control=['if'], data=['stepsize']]
if stepsize < 1:
raise ValueError('stepsize must be greater than or equal to one') # depends on [control=['if'], data=[]]
if not a.flags['C_CONTIGUOUS']:
a = a.copy() # depends on [control=['if'], data=[]]
if windowsize == 1 and stepsize == 1:
# A windowsize and stepsize of one mean that no windowing is necessary.
# Return the array unchanged.
return (np.zeros((0,) + a.shape[1:], dtype=a.dtype), a) # depends on [control=['if'], data=[]]
if windowsize == 1 and stepsize > 1:
return (np.zeros(0, dtype=a.dtype), a[::stepsize]) # depends on [control=['if'], data=[]]
# the original length of the input array
l = a.shape[0]
if dopad:
p = _wpad(l, windowsize, stepsize)
# pad the array with enough zeros so that there are no leftover samples
a = pad(a, p)
# no leftovers; an empty array
leftover = np.zeros((0,) + a.shape[1:], dtype=a.dtype) # depends on [control=['if'], data=[]]
else:
# cut the array so that any leftover samples are returned seperately
(c, lc) = _wcut(l, windowsize, stepsize)
leftover = a[lc:]
a = a[:c]
if 0 == a.shape[0]:
return (leftover, np.zeros(a.shape, dtype=a.dtype)) # depends on [control=['if'], data=[]]
n = 1 + (a.shape[0] - windowsize) // stepsize
s = a.strides[0]
newshape = (n, windowsize) + a.shape[1:]
newstrides = (stepsize * s, s) + a.strides[1:]
out = np.ndarray.__new__(np.ndarray, strides=newstrides, shape=newshape, buffer=a, dtype=a.dtype)
return (leftover, out) |
def generate_symbolic_cmd_line_arg(state, max_length=1000):
"""
Generates a new symbolic cmd line argument string.
:return: The string reference.
"""
str_ref = SimSootValue_StringRef(state.memory.get_new_uuid())
str_sym = StringS("cmd_line_arg", max_length)
state.solver.add(str_sym != StringV(""))
state.memory.store(str_ref, str_sym)
return str_ref | def function[generate_symbolic_cmd_line_arg, parameter[state, max_length]]:
constant[
Generates a new symbolic cmd line argument string.
:return: The string reference.
]
variable[str_ref] assign[=] call[name[SimSootValue_StringRef], parameter[call[name[state].memory.get_new_uuid, parameter[]]]]
variable[str_sym] assign[=] call[name[StringS], parameter[constant[cmd_line_arg], name[max_length]]]
call[name[state].solver.add, parameter[compare[name[str_sym] not_equal[!=] call[name[StringV], parameter[constant[]]]]]]
call[name[state].memory.store, parameter[name[str_ref], name[str_sym]]]
return[name[str_ref]] | keyword[def] identifier[generate_symbolic_cmd_line_arg] ( identifier[state] , identifier[max_length] = literal[int] ):
literal[string]
identifier[str_ref] = identifier[SimSootValue_StringRef] ( identifier[state] . identifier[memory] . identifier[get_new_uuid] ())
identifier[str_sym] = identifier[StringS] ( literal[string] , identifier[max_length] )
identifier[state] . identifier[solver] . identifier[add] ( identifier[str_sym] != identifier[StringV] ( literal[string] ))
identifier[state] . identifier[memory] . identifier[store] ( identifier[str_ref] , identifier[str_sym] )
keyword[return] identifier[str_ref] | def generate_symbolic_cmd_line_arg(state, max_length=1000):
"""
Generates a new symbolic cmd line argument string.
:return: The string reference.
"""
str_ref = SimSootValue_StringRef(state.memory.get_new_uuid())
str_sym = StringS('cmd_line_arg', max_length)
state.solver.add(str_sym != StringV(''))
state.memory.store(str_ref, str_sym)
return str_ref |
def load_stream(self, stream):
"""
Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields
a list of indices that can be used to put the RecordBatches in the correct order.
"""
# load the batches
for batch in self.serializer.load_stream(stream):
yield batch
# load the batch order indices
num = read_int(stream)
batch_order = []
for i in xrange(num):
index = read_int(stream)
batch_order.append(index)
yield batch_order | def function[load_stream, parameter[self, stream]]:
constant[
Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields
a list of indices that can be used to put the RecordBatches in the correct order.
]
for taget[name[batch]] in starred[call[name[self].serializer.load_stream, parameter[name[stream]]]] begin[:]
<ast.Yield object at 0x7da20c7ca2c0>
variable[num] assign[=] call[name[read_int], parameter[name[stream]]]
variable[batch_order] assign[=] list[[]]
for taget[name[i]] in starred[call[name[xrange], parameter[name[num]]]] begin[:]
variable[index] assign[=] call[name[read_int], parameter[name[stream]]]
call[name[batch_order].append, parameter[name[index]]]
<ast.Yield object at 0x7da20c7c89d0> | keyword[def] identifier[load_stream] ( identifier[self] , identifier[stream] ):
literal[string]
keyword[for] identifier[batch] keyword[in] identifier[self] . identifier[serializer] . identifier[load_stream] ( identifier[stream] ):
keyword[yield] identifier[batch]
identifier[num] = identifier[read_int] ( identifier[stream] )
identifier[batch_order] =[]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[num] ):
identifier[index] = identifier[read_int] ( identifier[stream] )
identifier[batch_order] . identifier[append] ( identifier[index] )
keyword[yield] identifier[batch_order] | def load_stream(self, stream):
"""
Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields
a list of indices that can be used to put the RecordBatches in the correct order.
"""
# load the batches
for batch in self.serializer.load_stream(stream):
yield batch # depends on [control=['for'], data=['batch']]
# load the batch order indices
num = read_int(stream)
batch_order = []
for i in xrange(num):
index = read_int(stream)
batch_order.append(index) # depends on [control=['for'], data=[]]
yield batch_order |
def select_larva(self):
"""Select all larva."""
action = sc_pb.Action()
action.action_ui.select_larva.SetInParent() # Adds the empty proto field.
return action | def function[select_larva, parameter[self]]:
constant[Select all larva.]
variable[action] assign[=] call[name[sc_pb].Action, parameter[]]
call[name[action].action_ui.select_larva.SetInParent, parameter[]]
return[name[action]] | keyword[def] identifier[select_larva] ( identifier[self] ):
literal[string]
identifier[action] = identifier[sc_pb] . identifier[Action] ()
identifier[action] . identifier[action_ui] . identifier[select_larva] . identifier[SetInParent] ()
keyword[return] identifier[action] | def select_larva(self):
"""Select all larva."""
action = sc_pb.Action()
action.action_ui.select_larva.SetInParent() # Adds the empty proto field.
return action |
def resource_headers(self, jobscript):
"""Given a :class:`~clusterjob.JobScript` instance, return a list of
lines that encode the resource requirements, to be added at the top of
the rendered job script
"""
resources = jobscript.resources
lines = []
cores_per_node = 1
nodes = 1
if 'ppn' in resources:
cores_per_node *= resources['ppn']
if 'threads' in resources:
cores_per_node *= resources['threads']
if 'nodes' in resources:
nodes = resources['nodes']
if len(set(['ppn', 'threads', 'nodes']).intersection(resources)) > 0:
lines.append('%s -l nodes=%d:ppn=%d'
% (self.prefix, nodes, cores_per_node))
for (key, val) in resources.items():
if key in ['nodes', 'threads', 'ppn']:
continue
if key in self.resource_replacements:
pbs_key = self.resource_replacements[key]
if key == 'mem':
val = str(val) + "m"
else:
pbs_key = key
if val is None:
continue
if type(val) is bool:
if val:
if not pbs_key.startswith('-'):
pbs_key = '-' + pbs_key
lines.append("%s %s" % (self.prefix, pbs_key))
else:
if not pbs_key.startswith('-'):
pbs_key = '-l %s=' % pbs_key
if pbs_key.endswith('='):
lines.append('%s %s%s' % (self.prefix, pbs_key, str(val)))
else:
lines.append('%s %s %s' % (self.prefix, pbs_key, str(val)))
return lines | def function[resource_headers, parameter[self, jobscript]]:
constant[Given a :class:`~clusterjob.JobScript` instance, return a list of
lines that encode the resource requirements, to be added at the top of
the rendered job script
]
variable[resources] assign[=] name[jobscript].resources
variable[lines] assign[=] list[[]]
variable[cores_per_node] assign[=] constant[1]
variable[nodes] assign[=] constant[1]
if compare[constant[ppn] in name[resources]] begin[:]
<ast.AugAssign object at 0x7da1b0bcba90>
if compare[constant[threads] in name[resources]] begin[:]
<ast.AugAssign object at 0x7da1b0bc8d90>
if compare[constant[nodes] in name[resources]] begin[:]
variable[nodes] assign[=] call[name[resources]][constant[nodes]]
if compare[call[name[len], parameter[call[call[name[set], parameter[list[[<ast.Constant object at 0x7da1b0bcb220>, <ast.Constant object at 0x7da1b0ba29e0>, <ast.Constant object at 0x7da1b0ba2a40>]]]].intersection, parameter[name[resources]]]]] greater[>] constant[0]] begin[:]
call[name[lines].append, parameter[binary_operation[constant[%s -l nodes=%d:ppn=%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0bc9a80>, <ast.Name object at 0x7da1b0bcadd0>, <ast.Name object at 0x7da1b0bcb9a0>]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0bc8a60>, <ast.Name object at 0x7da1b0bc8670>]]] in starred[call[name[resources].items, parameter[]]] begin[:]
if compare[name[key] in list[[<ast.Constant object at 0x7da1b0bc9c00>, <ast.Constant object at 0x7da1b0bc8b50>, <ast.Constant object at 0x7da1b0bc9240>]]] begin[:]
continue
if compare[name[key] in name[self].resource_replacements] begin[:]
variable[pbs_key] assign[=] call[name[self].resource_replacements][name[key]]
if compare[name[key] equal[==] constant[mem]] begin[:]
variable[val] assign[=] binary_operation[call[name[str], parameter[name[val]]] + constant[m]]
if compare[name[val] is constant[None]] begin[:]
continue
if compare[call[name[type], parameter[name[val]]] is name[bool]] begin[:]
if name[val] begin[:]
if <ast.UnaryOp object at 0x7da1b0bcb6d0> begin[:]
variable[pbs_key] assign[=] binary_operation[constant[-] + name[pbs_key]]
call[name[lines].append, parameter[binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0bc9990>, <ast.Name object at 0x7da1b0bca080>]]]]]
return[name[lines]] | keyword[def] identifier[resource_headers] ( identifier[self] , identifier[jobscript] ):
literal[string]
identifier[resources] = identifier[jobscript] . identifier[resources]
identifier[lines] =[]
identifier[cores_per_node] = literal[int]
identifier[nodes] = literal[int]
keyword[if] literal[string] keyword[in] identifier[resources] :
identifier[cores_per_node] *= identifier[resources] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[resources] :
identifier[cores_per_node] *= identifier[resources] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[resources] :
identifier[nodes] = identifier[resources] [ literal[string] ]
keyword[if] identifier[len] ( identifier[set] ([ literal[string] , literal[string] , literal[string] ]). identifier[intersection] ( identifier[resources] ))> literal[int] :
identifier[lines] . identifier[append] ( literal[string]
%( identifier[self] . identifier[prefix] , identifier[nodes] , identifier[cores_per_node] ))
keyword[for] ( identifier[key] , identifier[val] ) keyword[in] identifier[resources] . identifier[items] ():
keyword[if] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[continue]
keyword[if] identifier[key] keyword[in] identifier[self] . identifier[resource_replacements] :
identifier[pbs_key] = identifier[self] . identifier[resource_replacements] [ identifier[key] ]
keyword[if] identifier[key] == literal[string] :
identifier[val] = identifier[str] ( identifier[val] )+ literal[string]
keyword[else] :
identifier[pbs_key] = identifier[key]
keyword[if] identifier[val] keyword[is] keyword[None] :
keyword[continue]
keyword[if] identifier[type] ( identifier[val] ) keyword[is] identifier[bool] :
keyword[if] identifier[val] :
keyword[if] keyword[not] identifier[pbs_key] . identifier[startswith] ( literal[string] ):
identifier[pbs_key] = literal[string] + identifier[pbs_key]
identifier[lines] . identifier[append] ( literal[string] %( identifier[self] . identifier[prefix] , identifier[pbs_key] ))
keyword[else] :
keyword[if] keyword[not] identifier[pbs_key] . identifier[startswith] ( literal[string] ):
identifier[pbs_key] = literal[string] % identifier[pbs_key]
keyword[if] identifier[pbs_key] . identifier[endswith] ( literal[string] ):
identifier[lines] . identifier[append] ( literal[string] %( identifier[self] . identifier[prefix] , identifier[pbs_key] , identifier[str] ( identifier[val] )))
keyword[else] :
identifier[lines] . identifier[append] ( literal[string] %( identifier[self] . identifier[prefix] , identifier[pbs_key] , identifier[str] ( identifier[val] )))
keyword[return] identifier[lines] | def resource_headers(self, jobscript):
"""Given a :class:`~clusterjob.JobScript` instance, return a list of
lines that encode the resource requirements, to be added at the top of
the rendered job script
"""
resources = jobscript.resources
lines = []
cores_per_node = 1
nodes = 1
if 'ppn' in resources:
cores_per_node *= resources['ppn'] # depends on [control=['if'], data=['resources']]
if 'threads' in resources:
cores_per_node *= resources['threads'] # depends on [control=['if'], data=['resources']]
if 'nodes' in resources:
nodes = resources['nodes'] # depends on [control=['if'], data=['resources']]
if len(set(['ppn', 'threads', 'nodes']).intersection(resources)) > 0:
lines.append('%s -l nodes=%d:ppn=%d' % (self.prefix, nodes, cores_per_node)) # depends on [control=['if'], data=[]]
for (key, val) in resources.items():
if key in ['nodes', 'threads', 'ppn']:
continue # depends on [control=['if'], data=[]]
if key in self.resource_replacements:
pbs_key = self.resource_replacements[key]
if key == 'mem':
val = str(val) + 'm' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['key']]
else:
pbs_key = key
if val is None:
continue # depends on [control=['if'], data=[]]
if type(val) is bool:
if val:
if not pbs_key.startswith('-'):
pbs_key = '-' + pbs_key # depends on [control=['if'], data=[]]
lines.append('%s %s' % (self.prefix, pbs_key)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
if not pbs_key.startswith('-'):
pbs_key = '-l %s=' % pbs_key # depends on [control=['if'], data=[]]
if pbs_key.endswith('='):
lines.append('%s %s%s' % (self.prefix, pbs_key, str(val))) # depends on [control=['if'], data=[]]
else:
lines.append('%s %s %s' % (self.prefix, pbs_key, str(val))) # depends on [control=['for'], data=[]]
return lines |
def setColorRamp(self, colorRamp=None):
"""
Set the color ramp of the raster converter instance
"""
if not colorRamp:
self._colorRamp = RasterConverter.setDefaultColorRamp(ColorRampEnum.COLOR_RAMP_HUE)
else:
self._colorRamp = colorRamp | def function[setColorRamp, parameter[self, colorRamp]]:
constant[
Set the color ramp of the raster converter instance
]
if <ast.UnaryOp object at 0x7da1b20a96c0> begin[:]
name[self]._colorRamp assign[=] call[name[RasterConverter].setDefaultColorRamp, parameter[name[ColorRampEnum].COLOR_RAMP_HUE]] | keyword[def] identifier[setColorRamp] ( identifier[self] , identifier[colorRamp] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[colorRamp] :
identifier[self] . identifier[_colorRamp] = identifier[RasterConverter] . identifier[setDefaultColorRamp] ( identifier[ColorRampEnum] . identifier[COLOR_RAMP_HUE] )
keyword[else] :
identifier[self] . identifier[_colorRamp] = identifier[colorRamp] | def setColorRamp(self, colorRamp=None):
"""
Set the color ramp of the raster converter instance
"""
if not colorRamp:
self._colorRamp = RasterConverter.setDefaultColorRamp(ColorRampEnum.COLOR_RAMP_HUE) # depends on [control=['if'], data=[]]
else:
self._colorRamp = colorRamp |
def del_all_svc_downtimes(self, service):
"""Delete all service downtime
Format of the line that triggers function call::
DEL_ALL_SVC_DOWNTIMES;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
for downtime in service.downtimes:
self.del_svc_downtime(downtime)
self.send_an_element(service.get_update_status_brok()) | def function[del_all_svc_downtimes, parameter[self, service]]:
constant[Delete all service downtime
Format of the line that triggers function call::
DEL_ALL_SVC_DOWNTIMES;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
]
for taget[name[downtime]] in starred[name[service].downtimes] begin[:]
call[name[self].del_svc_downtime, parameter[name[downtime]]]
call[name[self].send_an_element, parameter[call[name[service].get_update_status_brok, parameter[]]]] | keyword[def] identifier[del_all_svc_downtimes] ( identifier[self] , identifier[service] ):
literal[string]
keyword[for] identifier[downtime] keyword[in] identifier[service] . identifier[downtimes] :
identifier[self] . identifier[del_svc_downtime] ( identifier[downtime] )
identifier[self] . identifier[send_an_element] ( identifier[service] . identifier[get_update_status_brok] ()) | def del_all_svc_downtimes(self, service):
"""Delete all service downtime
Format of the line that triggers function call::
DEL_ALL_SVC_DOWNTIMES;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
for downtime in service.downtimes:
self.del_svc_downtime(downtime) # depends on [control=['for'], data=['downtime']]
self.send_an_element(service.get_update_status_brok()) |
def detect_keep_boundary(start, end, namespaces):
"""a helper to inspect a link and see if we should keep the link boundary
"""
result_start, result_end = False, False
parent_start = start.getparent()
parent_end = end.getparent()
if parent_start.tag == "{%s}p" % namespaces['text']:
# more than one child in the containing paragraph ?
# we keep the boundary
result_start = len(parent_start.getchildren()) > 1
if parent_end.tag == "{%s}p" % namespaces['text']:
# more than one child in the containing paragraph ?
# we keep the boundary
result_end = len(parent_end.getchildren()) > 1
return result_start, result_end | def function[detect_keep_boundary, parameter[start, end, namespaces]]:
constant[a helper to inspect a link and see if we should keep the link boundary
]
<ast.Tuple object at 0x7da1b26af520> assign[=] tuple[[<ast.Constant object at 0x7da1b26adae0>, <ast.Constant object at 0x7da1b26ad7b0>]]
variable[parent_start] assign[=] call[name[start].getparent, parameter[]]
variable[parent_end] assign[=] call[name[end].getparent, parameter[]]
if compare[name[parent_start].tag equal[==] binary_operation[constant[{%s}p] <ast.Mod object at 0x7da2590d6920> call[name[namespaces]][constant[text]]]] begin[:]
variable[result_start] assign[=] compare[call[name[len], parameter[call[name[parent_start].getchildren, parameter[]]]] greater[>] constant[1]]
if compare[name[parent_end].tag equal[==] binary_operation[constant[{%s}p] <ast.Mod object at 0x7da2590d6920> call[name[namespaces]][constant[text]]]] begin[:]
variable[result_end] assign[=] compare[call[name[len], parameter[call[name[parent_end].getchildren, parameter[]]]] greater[>] constant[1]]
return[tuple[[<ast.Name object at 0x7da18f00d510>, <ast.Name object at 0x7da18f00f670>]]] | keyword[def] identifier[detect_keep_boundary] ( identifier[start] , identifier[end] , identifier[namespaces] ):
literal[string]
identifier[result_start] , identifier[result_end] = keyword[False] , keyword[False]
identifier[parent_start] = identifier[start] . identifier[getparent] ()
identifier[parent_end] = identifier[end] . identifier[getparent] ()
keyword[if] identifier[parent_start] . identifier[tag] == literal[string] % identifier[namespaces] [ literal[string] ]:
identifier[result_start] = identifier[len] ( identifier[parent_start] . identifier[getchildren] ())> literal[int]
keyword[if] identifier[parent_end] . identifier[tag] == literal[string] % identifier[namespaces] [ literal[string] ]:
identifier[result_end] = identifier[len] ( identifier[parent_end] . identifier[getchildren] ())> literal[int]
keyword[return] identifier[result_start] , identifier[result_end] | def detect_keep_boundary(start, end, namespaces):
"""a helper to inspect a link and see if we should keep the link boundary
"""
(result_start, result_end) = (False, False)
parent_start = start.getparent()
parent_end = end.getparent()
if parent_start.tag == '{%s}p' % namespaces['text']:
# more than one child in the containing paragraph ?
# we keep the boundary
result_start = len(parent_start.getchildren()) > 1 # depends on [control=['if'], data=[]]
if parent_end.tag == '{%s}p' % namespaces['text']:
# more than one child in the containing paragraph ?
# we keep the boundary
result_end = len(parent_end.getchildren()) > 1 # depends on [control=['if'], data=[]]
return (result_start, result_end) |
def commit_config(self, message=""):
"""Commit configuration."""
commit_args = {"comment": message} if message else {}
self.device.cu.commit(ignore_warning=self.ignore_warning, **commit_args)
if not self.lock_disable and not self.session_config_lock:
self._unlock() | def function[commit_config, parameter[self, message]]:
constant[Commit configuration.]
variable[commit_args] assign[=] <ast.IfExp object at 0x7da1b1c10b50>
call[name[self].device.cu.commit, parameter[]]
if <ast.BoolOp object at 0x7da1b1c13700> begin[:]
call[name[self]._unlock, parameter[]] | keyword[def] identifier[commit_config] ( identifier[self] , identifier[message] = literal[string] ):
literal[string]
identifier[commit_args] ={ literal[string] : identifier[message] } keyword[if] identifier[message] keyword[else] {}
identifier[self] . identifier[device] . identifier[cu] . identifier[commit] ( identifier[ignore_warning] = identifier[self] . identifier[ignore_warning] ,** identifier[commit_args] )
keyword[if] keyword[not] identifier[self] . identifier[lock_disable] keyword[and] keyword[not] identifier[self] . identifier[session_config_lock] :
identifier[self] . identifier[_unlock] () | def commit_config(self, message=''):
"""Commit configuration."""
commit_args = {'comment': message} if message else {}
self.device.cu.commit(ignore_warning=self.ignore_warning, **commit_args)
if not self.lock_disable and (not self.session_config_lock):
self._unlock() # depends on [control=['if'], data=[]] |
def cmd_devid(self, args):
'''decode device IDs from parameters'''
for p in self.mav_param.keys():
if p.startswith('COMPASS_DEV_ID'):
mp_util.decode_devid(self.mav_param[p], p)
if p.startswith('INS_') and p.endswith('_ID'):
mp_util.decode_devid(self.mav_param[p], p) | def function[cmd_devid, parameter[self, args]]:
constant[decode device IDs from parameters]
for taget[name[p]] in starred[call[name[self].mav_param.keys, parameter[]]] begin[:]
if call[name[p].startswith, parameter[constant[COMPASS_DEV_ID]]] begin[:]
call[name[mp_util].decode_devid, parameter[call[name[self].mav_param][name[p]], name[p]]]
if <ast.BoolOp object at 0x7da18f58d300> begin[:]
call[name[mp_util].decode_devid, parameter[call[name[self].mav_param][name[p]], name[p]]] | keyword[def] identifier[cmd_devid] ( identifier[self] , identifier[args] ):
literal[string]
keyword[for] identifier[p] keyword[in] identifier[self] . identifier[mav_param] . identifier[keys] ():
keyword[if] identifier[p] . identifier[startswith] ( literal[string] ):
identifier[mp_util] . identifier[decode_devid] ( identifier[self] . identifier[mav_param] [ identifier[p] ], identifier[p] )
keyword[if] identifier[p] . identifier[startswith] ( literal[string] ) keyword[and] identifier[p] . identifier[endswith] ( literal[string] ):
identifier[mp_util] . identifier[decode_devid] ( identifier[self] . identifier[mav_param] [ identifier[p] ], identifier[p] ) | def cmd_devid(self, args):
"""decode device IDs from parameters"""
for p in self.mav_param.keys():
if p.startswith('COMPASS_DEV_ID'):
mp_util.decode_devid(self.mav_param[p], p) # depends on [control=['if'], data=[]]
if p.startswith('INS_') and p.endswith('_ID'):
mp_util.decode_devid(self.mav_param[p], p) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']] |
def to_foreign(self, obj, name, value):
"""Transform to a MongoDB-safe value."""
if isinstance(value, Iterable) and not isinstance(value, Mapping):
return self.List(super(Array, self).to_foreign(obj, name, i) for i in value)
return super(Array, self).to_foreign(obj, name, value) | def function[to_foreign, parameter[self, obj, name, value]]:
constant[Transform to a MongoDB-safe value.]
if <ast.BoolOp object at 0x7da18fe924a0> begin[:]
return[call[name[self].List, parameter[<ast.GeneratorExp object at 0x7da20e9549d0>]]]
return[call[call[name[super], parameter[name[Array], name[self]]].to_foreign, parameter[name[obj], name[name], name[value]]]] | keyword[def] identifier[to_foreign] ( identifier[self] , identifier[obj] , identifier[name] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[Iterable] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[value] , identifier[Mapping] ):
keyword[return] identifier[self] . identifier[List] ( identifier[super] ( identifier[Array] , identifier[self] ). identifier[to_foreign] ( identifier[obj] , identifier[name] , identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[value] )
keyword[return] identifier[super] ( identifier[Array] , identifier[self] ). identifier[to_foreign] ( identifier[obj] , identifier[name] , identifier[value] ) | def to_foreign(self, obj, name, value):
"""Transform to a MongoDB-safe value."""
if isinstance(value, Iterable) and (not isinstance(value, Mapping)):
return self.List((super(Array, self).to_foreign(obj, name, i) for i in value)) # depends on [control=['if'], data=[]]
return super(Array, self).to_foreign(obj, name, value) |
def destroy(self):
'''
Called from tearDown of simulation tests. Cleans up everything.
'''
defers = list()
for x in self.iter_agents():
defers.append(x.terminate_hard())
yield defer.DeferredList(defers)
yield self._journaler.close()
del(self._journaler)
del(self._jourwriter)
del(self._messaging)
del(self._tunneling_bridge)
del(self._database)
del(self._agencies)
del(self._breakpoints)
del(self._parser)
del(self._output) | def function[destroy, parameter[self]]:
constant[
Called from tearDown of simulation tests. Cleans up everything.
]
variable[defers] assign[=] call[name[list], parameter[]]
for taget[name[x]] in starred[call[name[self].iter_agents, parameter[]]] begin[:]
call[name[defers].append, parameter[call[name[x].terminate_hard, parameter[]]]]
<ast.Yield object at 0x7da18dc99000>
<ast.Yield object at 0x7da207f994b0>
<ast.Delete object at 0x7da207f9ac80>
<ast.Delete object at 0x7da207f9a0b0>
<ast.Delete object at 0x7da207f99f30>
<ast.Delete object at 0x7da18dc9b6d0>
<ast.Delete object at 0x7da18dc987f0>
<ast.Delete object at 0x7da18dc9a6e0>
<ast.Delete object at 0x7da18dc9a740>
<ast.Delete object at 0x7da18dc988e0>
<ast.Delete object at 0x7da18dc98b20> | keyword[def] identifier[destroy] ( identifier[self] ):
literal[string]
identifier[defers] = identifier[list] ()
keyword[for] identifier[x] keyword[in] identifier[self] . identifier[iter_agents] ():
identifier[defers] . identifier[append] ( identifier[x] . identifier[terminate_hard] ())
keyword[yield] identifier[defer] . identifier[DeferredList] ( identifier[defers] )
keyword[yield] identifier[self] . identifier[_journaler] . identifier[close] ()
keyword[del] ( identifier[self] . identifier[_journaler] )
keyword[del] ( identifier[self] . identifier[_jourwriter] )
keyword[del] ( identifier[self] . identifier[_messaging] )
keyword[del] ( identifier[self] . identifier[_tunneling_bridge] )
keyword[del] ( identifier[self] . identifier[_database] )
keyword[del] ( identifier[self] . identifier[_agencies] )
keyword[del] ( identifier[self] . identifier[_breakpoints] )
keyword[del] ( identifier[self] . identifier[_parser] )
keyword[del] ( identifier[self] . identifier[_output] ) | def destroy(self):
"""
Called from tearDown of simulation tests. Cleans up everything.
"""
defers = list()
for x in self.iter_agents():
defers.append(x.terminate_hard()) # depends on [control=['for'], data=['x']]
yield defer.DeferredList(defers)
yield self._journaler.close()
del self._journaler
del self._jourwriter
del self._messaging
del self._tunneling_bridge
del self._database
del self._agencies
del self._breakpoints
del self._parser
del self._output |
def average(self, projection=None):
"""
Takes the average of elements in the sequence
>>> seq([1, 2]).average()
1.5
>>> seq([('a', 1), ('b', 2)]).average(lambda x: x[1])
:param projection: function to project on the sequence before taking the average
:return: average of elements in the sequence
"""
length = self.size()
if projection:
return sum(self.map(projection)) / length
else:
return sum(self) / length | def function[average, parameter[self, projection]]:
constant[
Takes the average of elements in the sequence
>>> seq([1, 2]).average()
1.5
>>> seq([('a', 1), ('b', 2)]).average(lambda x: x[1])
:param projection: function to project on the sequence before taking the average
:return: average of elements in the sequence
]
variable[length] assign[=] call[name[self].size, parameter[]]
if name[projection] begin[:]
return[binary_operation[call[name[sum], parameter[call[name[self].map, parameter[name[projection]]]]] / name[length]]] | keyword[def] identifier[average] ( identifier[self] , identifier[projection] = keyword[None] ):
literal[string]
identifier[length] = identifier[self] . identifier[size] ()
keyword[if] identifier[projection] :
keyword[return] identifier[sum] ( identifier[self] . identifier[map] ( identifier[projection] ))/ identifier[length]
keyword[else] :
keyword[return] identifier[sum] ( identifier[self] )/ identifier[length] | def average(self, projection=None):
"""
Takes the average of elements in the sequence
>>> seq([1, 2]).average()
1.5
>>> seq([('a', 1), ('b', 2)]).average(lambda x: x[1])
:param projection: function to project on the sequence before taking the average
:return: average of elements in the sequence
"""
length = self.size()
if projection:
return sum(self.map(projection)) / length # depends on [control=['if'], data=[]]
else:
return sum(self) / length |
def vcs_rbridge_context_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs_rbridge_context = ET.Element("vcs_rbridge_context")
config = vcs_rbridge_context
input = ET.SubElement(vcs_rbridge_context, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[vcs_rbridge_context_input_rbridge_id, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[vcs_rbridge_context] assign[=] call[name[ET].Element, parameter[constant[vcs_rbridge_context]]]
variable[config] assign[=] name[vcs_rbridge_context]
variable[input] assign[=] call[name[ET].SubElement, parameter[name[vcs_rbridge_context], constant[input]]]
variable[rbridge_id] assign[=] call[name[ET].SubElement, parameter[name[input], constant[rbridge-id]]]
name[rbridge_id].text assign[=] call[name[kwargs].pop, parameter[constant[rbridge_id]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[vcs_rbridge_context_input_rbridge_id] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[vcs_rbridge_context] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[vcs_rbridge_context]
identifier[input] = identifier[ET] . identifier[SubElement] ( identifier[vcs_rbridge_context] , literal[string] )
identifier[rbridge_id] = identifier[ET] . identifier[SubElement] ( identifier[input] , literal[string] )
identifier[rbridge_id] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def vcs_rbridge_context_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
vcs_rbridge_context = ET.Element('vcs_rbridge_context')
config = vcs_rbridge_context
input = ET.SubElement(vcs_rbridge_context, 'input')
rbridge_id = ET.SubElement(input, 'rbridge-id')
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def anonymized_formula(self):
"""
An anonymized formula. Appends charge to the end
of anonymized composition
"""
anon_formula = super().anonymized_formula
chg = self._charge
chg_str = ""
if chg > 0:
chg_str += ("{}{}".format('+', str(int(chg))))
elif chg < 0:
chg_str += ("{}{}".format('-', str(int(np.abs(chg)))))
return anon_formula + chg_str | def function[anonymized_formula, parameter[self]]:
constant[
An anonymized formula. Appends charge to the end
of anonymized composition
]
variable[anon_formula] assign[=] call[name[super], parameter[]].anonymized_formula
variable[chg] assign[=] name[self]._charge
variable[chg_str] assign[=] constant[]
if compare[name[chg] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da204566830>
return[binary_operation[name[anon_formula] + name[chg_str]]] | keyword[def] identifier[anonymized_formula] ( identifier[self] ):
literal[string]
identifier[anon_formula] = identifier[super] (). identifier[anonymized_formula]
identifier[chg] = identifier[self] . identifier[_charge]
identifier[chg_str] = literal[string]
keyword[if] identifier[chg] > literal[int] :
identifier[chg_str] +=( literal[string] . identifier[format] ( literal[string] , identifier[str] ( identifier[int] ( identifier[chg] ))))
keyword[elif] identifier[chg] < literal[int] :
identifier[chg_str] +=( literal[string] . identifier[format] ( literal[string] , identifier[str] ( identifier[int] ( identifier[np] . identifier[abs] ( identifier[chg] )))))
keyword[return] identifier[anon_formula] + identifier[chg_str] | def anonymized_formula(self):
"""
An anonymized formula. Appends charge to the end
of anonymized composition
"""
anon_formula = super().anonymized_formula
chg = self._charge
chg_str = ''
if chg > 0:
chg_str += '{}{}'.format('+', str(int(chg))) # depends on [control=['if'], data=['chg']]
elif chg < 0:
chg_str += '{}{}'.format('-', str(int(np.abs(chg)))) # depends on [control=['if'], data=['chg']]
return anon_formula + chg_str |
def _computeChart(chart, date):
""" Internal function to return a new chart for
a specific date using properties from old chart.
"""
pos = chart.pos
hsys = chart.hsys
IDs = [obj.id for obj in chart.objects]
return Chart(date, pos, IDs=IDs, hsys=hsys) | def function[_computeChart, parameter[chart, date]]:
constant[ Internal function to return a new chart for
a specific date using properties from old chart.
]
variable[pos] assign[=] name[chart].pos
variable[hsys] assign[=] name[chart].hsys
variable[IDs] assign[=] <ast.ListComp object at 0x7da1b2344f40>
return[call[name[Chart], parameter[name[date], name[pos]]]] | keyword[def] identifier[_computeChart] ( identifier[chart] , identifier[date] ):
literal[string]
identifier[pos] = identifier[chart] . identifier[pos]
identifier[hsys] = identifier[chart] . identifier[hsys]
identifier[IDs] =[ identifier[obj] . identifier[id] keyword[for] identifier[obj] keyword[in] identifier[chart] . identifier[objects] ]
keyword[return] identifier[Chart] ( identifier[date] , identifier[pos] , identifier[IDs] = identifier[IDs] , identifier[hsys] = identifier[hsys] ) | def _computeChart(chart, date):
""" Internal function to return a new chart for
a specific date using properties from old chart.
"""
pos = chart.pos
hsys = chart.hsys
IDs = [obj.id for obj in chart.objects]
return Chart(date, pos, IDs=IDs, hsys=hsys) |
def visit_module(self, node):
"""return an astroid.Module node as string"""
docs = '"""%s"""\n\n' % node.doc if node.doc else ""
return docs + "\n".join(n.accept(self) for n in node.body) + "\n\n" | def function[visit_module, parameter[self, node]]:
constant[return an astroid.Module node as string]
variable[docs] assign[=] <ast.IfExp object at 0x7da1b1e758a0>
return[binary_operation[binary_operation[name[docs] + call[constant[
].join, parameter[<ast.GeneratorExp object at 0x7da1b1e758d0>]]] + constant[
]]] | keyword[def] identifier[visit_module] ( identifier[self] , identifier[node] ):
literal[string]
identifier[docs] = literal[string] % identifier[node] . identifier[doc] keyword[if] identifier[node] . identifier[doc] keyword[else] literal[string]
keyword[return] identifier[docs] + literal[string] . identifier[join] ( identifier[n] . identifier[accept] ( identifier[self] ) keyword[for] identifier[n] keyword[in] identifier[node] . identifier[body] )+ literal[string] | def visit_module(self, node):
"""return an astroid.Module node as string"""
docs = '"""%s"""\n\n' % node.doc if node.doc else ''
return docs + '\n'.join((n.accept(self) for n in node.body)) + '\n\n' |
def saveLastScreenImage(self):
""" Saves the last image taken on this region's screen to a temporary file """
bitmap = self.getLastScreenImage()
_, target_file = tempfile.mkstemp(".png")
cv2.imwrite(target_file, bitmap) | def function[saveLastScreenImage, parameter[self]]:
constant[ Saves the last image taken on this region's screen to a temporary file ]
variable[bitmap] assign[=] call[name[self].getLastScreenImage, parameter[]]
<ast.Tuple object at 0x7da20e962860> assign[=] call[name[tempfile].mkstemp, parameter[constant[.png]]]
call[name[cv2].imwrite, parameter[name[target_file], name[bitmap]]] | keyword[def] identifier[saveLastScreenImage] ( identifier[self] ):
literal[string]
identifier[bitmap] = identifier[self] . identifier[getLastScreenImage] ()
identifier[_] , identifier[target_file] = identifier[tempfile] . identifier[mkstemp] ( literal[string] )
identifier[cv2] . identifier[imwrite] ( identifier[target_file] , identifier[bitmap] ) | def saveLastScreenImage(self):
""" Saves the last image taken on this region's screen to a temporary file """
bitmap = self.getLastScreenImage()
(_, target_file) = tempfile.mkstemp('.png')
cv2.imwrite(target_file, bitmap) |
def create(dataset, annotations=None, feature=None, model='darknet-yolo',
classes=None, batch_size=0, max_iterations=0, verbose=True,
**kwargs):
"""
Create a :class:`ObjectDetector` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``annotations``
parameters will be extracted for training the detector.
annotations : string
Name of the column containing the object detection annotations. This
column should be a list of dictionaries (or a single dictionary), with
each dictionary representing a bounding box of an object instance. Here
is an example of the annotations for a single image with two object
instances::
[{'label': 'dog',
'type': 'rectangle',
'coordinates': {'x': 223, 'y': 198,
'width': 130, 'height': 230}},
{'label': 'cat',
'type': 'rectangle',
'coordinates': {'x': 40, 'y': 73,
'width': 80, 'height': 123}}]
The value for `x` is the horizontal center of the box paired with
`width` and `y` is the vertical center of the box paired with `height`.
'None' (the default) indicates the only list column in `dataset` should
be used for the annotations.
feature : string
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
model : string optional
Object detection model to use:
- "darknet-yolo" : Fast and medium-sized model
classes : list optional
List of strings containing the names of the classes of objects.
Inferred from the data if not provided.
batch_size: int
The number of images per training iteration. If 0, then it will be
automatically determined based on resource availability.
max_iterations : int
The number of training iterations. If 0, then it will be automatically
be determined based on the amount of data you provide.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ObjectDetector
A trained :class:`ObjectDetector` model.
See Also
--------
ObjectDetector
Examples
--------
.. sourcecode:: python
# Train an object detector model
>>> model = turicreate.object_detector.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_raise_error_if_not_sframe(dataset, "dataset")
from ._mx_detector import YOLOLoss as _YOLOLoss
from ._model import tiny_darknet as _tiny_darknet
from ._sframe_loader import SFrameDetectionIter as _SFrameDetectionIter
from ._manual_scheduler import ManualScheduler as _ManualScheduler
import mxnet as _mx
from .._mxnet import _mxnet_utils
if len(dataset) == 0:
raise _ToolkitError('Unable to train on empty dataset')
_numeric_param_check_range('max_iterations', max_iterations, 0, _six.MAXSIZE)
start_time = _time.time()
supported_detectors = ['darknet-yolo']
if feature is None:
feature = _tkutl._find_only_image_column(dataset)
if verbose:
print("Using '%s' as feature column" % feature)
if annotations is None:
annotations = _tkutl._find_only_column_of_type(dataset,
target_type=[list, dict],
type_name='list',
col_name='annotations')
if verbose:
print("Using '%s' as annotations column" % annotations)
_raise_error_if_not_detection_sframe(dataset, feature, annotations,
require_annotations=True)
is_annotations_list = dataset[annotations].dtype == list
_tkutl._check_categorical_option_type('model', model,
supported_detectors)
base_model = model.split('-', 1)[0]
ref_model = _pre_trained_models.OBJECT_DETECTION_BASE_MODELS[base_model]()
params = {
'anchors': [
(1.0, 2.0), (1.0, 1.0), (2.0, 1.0),
(2.0, 4.0), (2.0, 2.0), (4.0, 2.0),
(4.0, 8.0), (4.0, 4.0), (8.0, 4.0),
(8.0, 16.0), (8.0, 8.0), (16.0, 8.0),
(16.0, 32.0), (16.0, 16.0), (32.0, 16.0),
],
'grid_shape': [13, 13],
'aug_resize': 0,
'aug_rand_crop': 0.9,
'aug_rand_pad': 0.9,
'aug_rand_gray': 0.0,
'aug_aspect_ratio': 1.25,
'aug_hue': 0.05,
'aug_brightness': 0.05,
'aug_saturation': 0.05,
'aug_contrast': 0.05,
'aug_horizontal_flip': True,
'aug_min_object_covered': 0,
'aug_min_eject_coverage': 0.5,
'aug_area_range': (.15, 2),
'aug_pca_noise': 0.0,
'aug_max_attempts': 20,
'aug_inter_method': 2,
'lmb_coord_xy': 10.0,
'lmb_coord_wh': 10.0,
'lmb_obj': 100.0,
'lmb_noobj': 5.0,
'lmb_class': 2.0,
'non_maximum_suppression_threshold': 0.45,
'rescore': True,
'clip_gradients': 0.025,
'weight_decay': 0.0005,
'sgd_momentum': 0.9,
'learning_rate': 1.0e-3,
'shuffle': True,
'mps_loss_mult': 8,
# This large buffer size (8 batches) is an attempt to mitigate against
# the SFrame shuffle operation that can occur after each epoch.
'io_thread_buffer_size': 8,
}
if '_advanced_parameters' in kwargs:
# Make sure no additional parameters are provided
new_keys = set(kwargs['_advanced_parameters'].keys())
set_keys = set(params.keys())
unsupported = new_keys - set_keys
if unsupported:
raise _ToolkitError('Unknown advanced parameters: {}'.format(unsupported))
params.update(kwargs['_advanced_parameters'])
anchors = params['anchors']
num_anchors = len(anchors)
if batch_size < 1:
batch_size = 32 # Default if not user-specified
cuda_gpus = _mxnet_utils.get_gpus_in_use(max_devices=batch_size)
num_mxnet_gpus = len(cuda_gpus)
use_mps = _use_mps() and num_mxnet_gpus == 0
batch_size_each = batch_size // max(num_mxnet_gpus, 1)
if use_mps and _mps_device_memory_limit() < 4 * 1024 * 1024 * 1024:
# Reduce batch size for GPUs with less than 4GB RAM
batch_size_each = 16
# Note, this may slightly alter the batch size to fit evenly on the GPUs
batch_size = max(num_mxnet_gpus, 1) * batch_size_each
if verbose:
print("Setting 'batch_size' to {}".format(batch_size))
# The IO thread also handles MXNet-powered data augmentation. This seems
# to be problematic to run independently of a MXNet-powered neural network
# in a separate thread. For this reason, we restrict IO threads to when
# the neural network backend is MPS.
io_thread_buffer_size = params['io_thread_buffer_size'] if use_mps else 0
if verbose:
# Estimate memory usage (based on experiments)
cuda_mem_req = 550 + batch_size_each * 85
_tkutl._print_neural_compute_device(cuda_gpus=cuda_gpus, use_mps=use_mps,
cuda_mem_req=cuda_mem_req)
grid_shape = params['grid_shape']
input_image_shape = (3,
grid_shape[0] * ref_model.spatial_reduction,
grid_shape[1] * ref_model.spatial_reduction)
try:
if is_annotations_list:
instances = (dataset.stack(annotations, new_column_name='_bbox', drop_na=True)
.unpack('_bbox', limit=['label']))
else:
instances = dataset.rename({annotations: '_bbox'}).dropna('_bbox')
instances = instances.unpack('_bbox', limit=['label'])
except (TypeError, RuntimeError):
# If this fails, the annotation format isinvalid at the coarsest level
raise _ToolkitError("Annotations format is invalid. Must be a list of "
"dictionaries or single dictionary containing 'label' and 'coordinates'.")
num_images = len(dataset)
num_instances = len(instances)
if classes is None:
classes = instances['_bbox.label'].unique()
classes = sorted(classes)
# Make a class-to-index look-up table
class_to_index = {name: index for index, name in enumerate(classes)}
num_classes = len(classes)
if max_iterations == 0:
# Set number of iterations through a heuristic
num_iterations_raw = 5000 * _np.sqrt(num_instances) / batch_size
num_iterations = 1000 * max(1, int(round(num_iterations_raw / 1000)))
if verbose:
print("Setting 'max_iterations' to {}".format(num_iterations))
else:
num_iterations = max_iterations
# Create data loader
loader = _SFrameDetectionIter(dataset,
batch_size=batch_size,
input_shape=input_image_shape[1:],
output_shape=grid_shape,
anchors=anchors,
class_to_index=class_to_index,
aug_params=params,
shuffle=params['shuffle'],
loader_type='augmented',
feature_column=feature,
annotations_column=annotations,
io_thread_buffer_size=io_thread_buffer_size,
iterations=num_iterations)
# Predictions per anchor box: x/y + w/h + object confidence + class probs
preds_per_box = 5 + num_classes
output_size = preds_per_box * num_anchors
ymap_shape = (batch_size_each,) + tuple(grid_shape) + (num_anchors, preds_per_box)
net = _tiny_darknet(output_size=output_size)
loss = _YOLOLoss(input_shape=input_image_shape[1:],
output_shape=grid_shape,
batch_size=batch_size_each,
num_classes=num_classes,
anchors=anchors,
parameters=params)
base_lr = params['learning_rate']
steps = [num_iterations // 2, 3 * num_iterations // 4, num_iterations]
steps_and_factors = [(step, 10**(-i)) for i, step in enumerate(steps)]
steps, factors = zip(*steps_and_factors)
lr_scheduler = _ManualScheduler(step=steps, factor=factors)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
net_params = net.collect_params()
net_params.initialize(_mx.init.Xavier(), ctx=ctx)
net_params['conv7_weight'].initialize(_mx.init.Xavier(factor_type='avg'), ctx=ctx, force_reinit=True)
net_params['conv8_weight'].initialize(_mx.init.Uniform(0.00005), ctx=ctx, force_reinit=True)
# Initialize object confidence low, preventing an unnecessary adjustment
# period toward conservative estimates
bias = _np.zeros(output_size, dtype=_np.float32)
bias[4::preds_per_box] -= 6
from ._mx_detector import ConstantArray
net_params['conv8_bias'].initialize(ConstantArray(bias), ctx, force_reinit=True)
# Take a subset and then load the rest of the parameters. It is possible to
# do allow_missing=True directly on net_params. However, this will more
# easily hide bugs caused by names getting out of sync.
ref_model.available_parameters_subset(net_params).load(ref_model.model_path, ctx)
column_names = ['Iteration', 'Loss', 'Elapsed Time']
num_columns = len(column_names)
column_width = max(map(lambda x: len(x), column_names)) + 2
hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+'
progress = {'smoothed_loss': None, 'last_time': 0}
iteration = 0
def update_progress(cur_loss, iteration):
iteration_base1 = iteration + 1
if progress['smoothed_loss'] is None:
progress['smoothed_loss'] = cur_loss
else:
progress['smoothed_loss'] = 0.9 * progress['smoothed_loss'] + 0.1 * cur_loss
cur_time = _time.time()
# Printing of table header is deferred, so that start-of-training
# warnings appear above the table
if verbose and iteration == 0:
# Print progress table header
print(hr)
print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width-1))
print(hr)
if verbose and (cur_time > progress['last_time'] + 10 or
iteration_base1 == max_iterations):
# Print progress table row
elapsed_time = cur_time - start_time
print("| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|".format(
cur_iter=iteration_base1, loss=progress['smoothed_loss'],
time=elapsed_time , width=column_width-1))
progress['last_time'] = cur_time
if use_mps:
# Force initialization of net_params
# TODO: Do not rely on MXNet to initialize MPS-based network
net.forward(_mx.nd.uniform(0, 1, (batch_size_each,) + input_image_shape))
mps_net_params = {}
keys = list(net_params)
for k in keys:
mps_net_params[k] = net_params[k].data().asnumpy()
# Multiplies the loss to move the fp16 gradients away from subnormals
# and gradual underflow. The learning rate is correspondingly divided
# by the same multiple to make training mathematically equivalent. The
# update is done in fp32, which is why this trick works. Does not
# affect how loss is presented to the user.
mps_loss_mult = params['mps_loss_mult']
mps_config = {
'mode': _MpsGraphMode.Train,
'use_sgd': True,
'learning_rate': base_lr / params['mps_loss_mult'],
'gradient_clipping': params.get('clip_gradients', 0.0) * mps_loss_mult,
'weight_decay': params['weight_decay'],
'od_include_network': True,
'od_include_loss': True,
'od_scale_xy': params['lmb_coord_xy'] * mps_loss_mult,
'od_scale_wh': params['lmb_coord_wh'] * mps_loss_mult,
'od_scale_no_object': params['lmb_noobj'] * mps_loss_mult,
'od_scale_object': params['lmb_obj'] * mps_loss_mult,
'od_scale_class': params['lmb_class'] * mps_loss_mult,
'od_max_iou_for_no_object': 0.3,
'od_min_iou_for_object': 0.7,
'od_rescore': params['rescore'],
}
mps_net = _get_mps_od_net(input_image_shape=input_image_shape,
batch_size=batch_size,
output_size=output_size,
anchors=anchors,
config=mps_config,
weights=mps_net_params)
# Use worker threads to isolate different points of synchronization
# and/or waiting for non-Python tasks to finish. The
# sframe_worker_thread will spend most of its time waiting for SFrame
# operations, largely image I/O and decoding, along with scheduling
# MXNet data augmentation. The numpy_worker_thread will spend most of
# its time waiting for MXNet data augmentation to complete, along with
# copying the results into NumPy arrays. Finally, the main thread will
# spend most of its time copying NumPy data into MPS and waiting for the
# results. Note that using three threads here only makes sense because
# each thread spends time waiting for non-Python code to finish (so that
# no thread hogs the global interpreter lock).
mxnet_batch_queue = _Queue(1)
numpy_batch_queue = _Queue(1)
def sframe_worker():
# Once a batch is loaded into NumPy, pass it immediately to the
# numpy_worker so that we can start I/O and decoding for the next
# batch.
for batch in loader:
mxnet_batch_queue.put(batch)
mxnet_batch_queue.put(None)
def numpy_worker():
while True:
batch = mxnet_batch_queue.get()
if batch is None:
break
for x, y in zip(batch.data, batch.label):
# Convert to NumPy arrays with required shapes. Note that
# asnumpy waits for any pending MXNet operations to finish.
input_data = _mxnet_to_mps(x.asnumpy())
label_data = y.asnumpy().reshape(y.shape[:-2] + (-1,))
# Convert to packed 32-bit arrays.
input_data = input_data.astype(_np.float32)
if not input_data.flags.c_contiguous:
input_data = input_data.copy()
label_data = label_data.astype(_np.float32)
if not label_data.flags.c_contiguous:
label_data = label_data.copy()
# Push this batch to the main thread.
numpy_batch_queue.put({'input' : input_data,
'label' : label_data,
'iteration' : batch.iteration})
# Tell the main thread there's no more data.
numpy_batch_queue.put(None)
sframe_worker_thread = _Thread(target=sframe_worker)
sframe_worker_thread.start()
numpy_worker_thread = _Thread(target=numpy_worker)
numpy_worker_thread.start()
batch_queue = []
def wait_for_batch():
pending_loss = batch_queue.pop(0)
batch_loss = pending_loss.asnumpy() # Waits for the batch to finish
return batch_loss.sum() / mps_loss_mult
while True:
batch = numpy_batch_queue.get()
if batch is None:
break
# Adjust learning rate according to our schedule.
if batch['iteration'] in steps:
ii = steps.index(batch['iteration']) + 1
new_lr = factors[ii] * base_lr
mps_net.set_learning_rate(new_lr / mps_loss_mult)
# Submit this match to MPS.
batch_queue.append(mps_net.train(batch['input'], batch['label']))
# If we have two batches in flight, wait for the first one.
if len(batch_queue) > 1:
cur_loss = wait_for_batch()
# If we just submitted the first batch of an iteration, update
# progress for the iteration completed by the last batch we just
# waited for.
if batch['iteration'] > iteration:
update_progress(cur_loss, iteration)
iteration = batch['iteration']
# Wait for any pending batches and finalize our progress updates.
while len(batch_queue) > 0:
cur_loss = wait_for_batch()
update_progress(cur_loss, iteration)
sframe_worker_thread.join()
numpy_worker_thread.join()
# Load back into mxnet
mps_net_params = mps_net.export()
keys = mps_net_params.keys()
for k in keys:
if k in net_params:
net_params[k].set_data(mps_net_params[k])
else: # Use MxNet
net.hybridize()
options = {'learning_rate': base_lr, 'lr_scheduler': lr_scheduler,
'momentum': params['sgd_momentum'], 'wd': params['weight_decay'], 'rescale_grad': 1.0}
clip_grad = params.get('clip_gradients')
if clip_grad:
options['clip_gradient'] = clip_grad
trainer = _mx.gluon.Trainer(net.collect_params(), 'sgd', options)
for batch in loader:
data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
Ls = []
Zs = []
with _mx.autograd.record():
for x, y in zip(data, label):
z = net(x)
z0 = _mx.nd.transpose(z, [0, 2, 3, 1]).reshape(ymap_shape)
L = loss(z0, y)
Ls.append(L)
for L in Ls:
L.backward()
trainer.step(1)
cur_loss = _np.mean([L.asnumpy()[0] for L in Ls])
update_progress(cur_loss, batch.iteration)
iteration = batch.iteration
training_time = _time.time() - start_time
if verbose:
print(hr) # progress table footer
# Save the model
training_iterations = iteration + 1
state = {
'_model': net,
'_class_to_index': class_to_index,
'_training_time_as_string': _seconds_as_string(training_time),
'_grid_shape': grid_shape,
'anchors': anchors,
'model': model,
'classes': classes,
'batch_size': batch_size,
'input_image_shape': input_image_shape,
'feature': feature,
'non_maximum_suppression_threshold': params['non_maximum_suppression_threshold'],
'annotations': annotations,
'num_classes': num_classes,
'num_examples': num_images,
'num_bounding_boxes': num_instances,
'training_time': training_time,
'training_epochs': training_iterations * batch_size // num_images,
'training_iterations': training_iterations,
'max_iterations': max_iterations,
'training_loss': progress['smoothed_loss'],
}
return ObjectDetector(state) | def function[create, parameter[dataset, annotations, feature, model, classes, batch_size, max_iterations, verbose]]:
constant[
Create a :class:`ObjectDetector` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``annotations``
parameters will be extracted for training the detector.
annotations : string
Name of the column containing the object detection annotations. This
column should be a list of dictionaries (or a single dictionary), with
each dictionary representing a bounding box of an object instance. Here
is an example of the annotations for a single image with two object
instances::
[{'label': 'dog',
'type': 'rectangle',
'coordinates': {'x': 223, 'y': 198,
'width': 130, 'height': 230}},
{'label': 'cat',
'type': 'rectangle',
'coordinates': {'x': 40, 'y': 73,
'width': 80, 'height': 123}}]
The value for `x` is the horizontal center of the box paired with
`width` and `y` is the vertical center of the box paired with `height`.
'None' (the default) indicates the only list column in `dataset` should
be used for the annotations.
feature : string
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
model : string optional
Object detection model to use:
- "darknet-yolo" : Fast and medium-sized model
classes : list optional
List of strings containing the names of the classes of objects.
Inferred from the data if not provided.
batch_size: int
The number of images per training iteration. If 0, then it will be
automatically determined based on resource availability.
max_iterations : int
The number of training iterations. If 0, then it will be automatically
be determined based on the amount of data you provide.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ObjectDetector
A trained :class:`ObjectDetector` model.
See Also
--------
ObjectDetector
Examples
--------
.. sourcecode:: python
# Train an object detector model
>>> model = turicreate.object_detector.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
]
call[name[_raise_error_if_not_sframe], parameter[name[dataset], constant[dataset]]]
from relative_module[_mx_detector] import module[YOLOLoss]
from relative_module[_model] import module[tiny_darknet]
from relative_module[_sframe_loader] import module[SFrameDetectionIter]
from relative_module[_manual_scheduler] import module[ManualScheduler]
import module[mxnet] as alias[_mx]
from relative_module[_mxnet] import module[_mxnet_utils]
if compare[call[name[len], parameter[name[dataset]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1fbae90>
call[name[_numeric_param_check_range], parameter[constant[max_iterations], name[max_iterations], constant[0], name[_six].MAXSIZE]]
variable[start_time] assign[=] call[name[_time].time, parameter[]]
variable[supported_detectors] assign[=] list[[<ast.Constant object at 0x7da1b1fbaaa0>]]
if compare[name[feature] is constant[None]] begin[:]
variable[feature] assign[=] call[name[_tkutl]._find_only_image_column, parameter[name[dataset]]]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[Using '%s' as feature column] <ast.Mod object at 0x7da2590d6920> name[feature]]]]
if compare[name[annotations] is constant[None]] begin[:]
variable[annotations] assign[=] call[name[_tkutl]._find_only_column_of_type, parameter[name[dataset]]]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[Using '%s' as annotations column] <ast.Mod object at 0x7da2590d6920> name[annotations]]]]
call[name[_raise_error_if_not_detection_sframe], parameter[name[dataset], name[feature], name[annotations]]]
variable[is_annotations_list] assign[=] compare[call[name[dataset]][name[annotations]].dtype equal[==] name[list]]
call[name[_tkutl]._check_categorical_option_type, parameter[constant[model], name[model], name[supported_detectors]]]
variable[base_model] assign[=] call[call[name[model].split, parameter[constant[-], constant[1]]]][constant[0]]
variable[ref_model] assign[=] call[call[name[_pre_trained_models].OBJECT_DETECTION_BASE_MODELS][name[base_model]], parameter[]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b1fb9990>, <ast.Constant object at 0x7da1b1fb9960>, <ast.Constant object at 0x7da1b1fb9930>, <ast.Constant object at 0x7da1b1fb9900>, <ast.Constant object at 0x7da1b1fb98d0>, <ast.Constant object at 0x7da1b1fb98a0>, <ast.Constant object at 0x7da1b1fb9870>, <ast.Constant object at 0x7da1b1fb9840>, <ast.Constant object at 0x7da1b1fb9810>, <ast.Constant object at 0x7da1b1fb97e0>, <ast.Constant object at 0x7da1b1fb97b0>, <ast.Constant object at 0x7da1b1fb9780>, <ast.Constant object at 0x7da1b1fb9750>, <ast.Constant object at 0x7da1b1fb9720>, <ast.Constant object at 0x7da1b1fb96f0>, <ast.Constant object at 0x7da1b1fb96c0>, <ast.Constant object at 0x7da1b1fb9690>, <ast.Constant object at 0x7da1b1fb9660>, <ast.Constant object at 0x7da1b1fb9630>, <ast.Constant object at 0x7da1b1fb9600>, <ast.Constant object at 0x7da1b1fb95d0>, <ast.Constant object at 0x7da1b1fb95a0>, <ast.Constant object at 0x7da1b1fb9570>, <ast.Constant object at 0x7da1b1fb9540>, <ast.Constant object at 0x7da1b1fb9510>, <ast.Constant object at 0x7da1b1fb94e0>, <ast.Constant object at 0x7da1b1fb94b0>, <ast.Constant object at 0x7da1b1fb9480>, <ast.Constant object at 0x7da1b1fb9450>, <ast.Constant object at 0x7da1b1fb9420>, <ast.Constant object at 0x7da1b1fb93f0>, <ast.Constant object at 0x7da1b1fb93c0>], [<ast.List object at 0x7da1b1fb9390>, <ast.List object at 0x7da1b1fb8af0>, <ast.Constant object at 0x7da1b1fb8a60>, <ast.Constant object at 0x7da1b1fb8a30>, <ast.Constant object at 0x7da1b1fb8a00>, <ast.Constant object at 0x7da1b1fb89d0>, <ast.Constant object at 0x7da1b1fb89a0>, <ast.Constant object at 0x7da1b1fb8970>, <ast.Constant object at 0x7da1b1fb8940>, <ast.Constant object at 0x7da1b1fb8910>, <ast.Constant object at 0x7da1b1fb88e0>, <ast.Constant object at 0x7da1b1fb88b0>, <ast.Constant object at 0x7da1b1fb8880>, <ast.Constant object at 0x7da1b1fb8850>, <ast.Tuple object at 0x7da1b1fb8820>, <ast.Constant object at 0x7da1b1fb8790>, <ast.Constant object at 0x7da1b1fb8760>, <ast.Constant object at 0x7da1b1fb8730>, <ast.Constant object at 0x7da1b1fb8700>, <ast.Constant object at 0x7da1b1fb86d0>, <ast.Constant object at 0x7da1b1fb86a0>, <ast.Constant object at 0x7da1b1fb8670>, <ast.Constant object at 0x7da1b1fb8640>, <ast.Constant object at 0x7da1b1fb8610>, <ast.Constant object at 0x7da1b1fb85e0>, <ast.Constant object at 0x7da1b1fb85b0>, <ast.Constant object at 0x7da1b1fb8580>, <ast.Constant object at 0x7da1b1fb8550>, <ast.Constant object at 0x7da1b1fb8520>, <ast.Constant object at 0x7da1b1fb84f0>, <ast.Constant object at 0x7da1b1fb84c0>, <ast.Constant object at 0x7da1b1fb8490>]]
if compare[constant[_advanced_parameters] in name[kwargs]] begin[:]
variable[new_keys] assign[=] call[name[set], parameter[call[call[name[kwargs]][constant[_advanced_parameters]].keys, parameter[]]]]
variable[set_keys] assign[=] call[name[set], parameter[call[name[params].keys, parameter[]]]]
variable[unsupported] assign[=] binary_operation[name[new_keys] - name[set_keys]]
if name[unsupported] begin[:]
<ast.Raise object at 0x7da1b1f33ee0>
call[name[params].update, parameter[call[name[kwargs]][constant[_advanced_parameters]]]]
variable[anchors] assign[=] call[name[params]][constant[anchors]]
variable[num_anchors] assign[=] call[name[len], parameter[name[anchors]]]
if compare[name[batch_size] less[<] constant[1]] begin[:]
variable[batch_size] assign[=] constant[32]
variable[cuda_gpus] assign[=] call[name[_mxnet_utils].get_gpus_in_use, parameter[]]
variable[num_mxnet_gpus] assign[=] call[name[len], parameter[name[cuda_gpus]]]
variable[use_mps] assign[=] <ast.BoolOp object at 0x7da1b1f33640>
variable[batch_size_each] assign[=] binary_operation[name[batch_size] <ast.FloorDiv object at 0x7da2590d6bc0> call[name[max], parameter[name[num_mxnet_gpus], constant[1]]]]
if <ast.BoolOp object at 0x7da1b1fae050> begin[:]
variable[batch_size_each] assign[=] constant[16]
variable[batch_size] assign[=] binary_operation[call[name[max], parameter[name[num_mxnet_gpus], constant[1]]] * name[batch_size_each]]
if name[verbose] begin[:]
call[name[print], parameter[call[constant[Setting 'batch_size' to {}].format, parameter[name[batch_size]]]]]
variable[io_thread_buffer_size] assign[=] <ast.IfExp object at 0x7da1b1fafb50>
if name[verbose] begin[:]
variable[cuda_mem_req] assign[=] binary_operation[constant[550] + binary_operation[name[batch_size_each] * constant[85]]]
call[name[_tkutl]._print_neural_compute_device, parameter[]]
variable[grid_shape] assign[=] call[name[params]][constant[grid_shape]]
variable[input_image_shape] assign[=] tuple[[<ast.Constant object at 0x7da1b1fac400>, <ast.BinOp object at 0x7da1b1facbe0>, <ast.BinOp object at 0x7da1b1fade70>]]
<ast.Try object at 0x7da1b1fac3a0>
variable[num_images] assign[=] call[name[len], parameter[name[dataset]]]
variable[num_instances] assign[=] call[name[len], parameter[name[instances]]]
if compare[name[classes] is constant[None]] begin[:]
variable[classes] assign[=] call[call[name[instances]][constant[_bbox.label]].unique, parameter[]]
variable[classes] assign[=] call[name[sorted], parameter[name[classes]]]
variable[class_to_index] assign[=] <ast.DictComp object at 0x7da1b1facdc0>
variable[num_classes] assign[=] call[name[len], parameter[name[classes]]]
if compare[name[max_iterations] equal[==] constant[0]] begin[:]
variable[num_iterations_raw] assign[=] binary_operation[binary_operation[constant[5000] * call[name[_np].sqrt, parameter[name[num_instances]]]] / name[batch_size]]
variable[num_iterations] assign[=] binary_operation[constant[1000] * call[name[max], parameter[constant[1], call[name[int], parameter[call[name[round], parameter[binary_operation[name[num_iterations_raw] / constant[1000]]]]]]]]]
if name[verbose] begin[:]
call[name[print], parameter[call[constant[Setting 'max_iterations' to {}].format, parameter[name[num_iterations]]]]]
variable[loader] assign[=] call[name[_SFrameDetectionIter], parameter[name[dataset]]]
variable[preds_per_box] assign[=] binary_operation[constant[5] + name[num_classes]]
variable[output_size] assign[=] binary_operation[name[preds_per_box] * name[num_anchors]]
variable[ymap_shape] assign[=] binary_operation[binary_operation[tuple[[<ast.Name object at 0x7da1b20ef430>]] + call[name[tuple], parameter[name[grid_shape]]]] + tuple[[<ast.Name object at 0x7da1b20ed840>, <ast.Name object at 0x7da1b20eddb0>]]]
variable[net] assign[=] call[name[_tiny_darknet], parameter[]]
variable[loss] assign[=] call[name[_YOLOLoss], parameter[]]
variable[base_lr] assign[=] call[name[params]][constant[learning_rate]]
variable[steps] assign[=] list[[<ast.BinOp object at 0x7da1b20ede40>, <ast.BinOp object at 0x7da1b20ee170>, <ast.Name object at 0x7da1b20ed180>]]
variable[steps_and_factors] assign[=] <ast.ListComp object at 0x7da1b20ec670>
<ast.Tuple object at 0x7da1b20ed780> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da1b20ed540>]]
variable[lr_scheduler] assign[=] call[name[_ManualScheduler], parameter[]]
variable[ctx] assign[=] call[name[_mxnet_utils].get_mxnet_context, parameter[]]
variable[net_params] assign[=] call[name[net].collect_params, parameter[]]
call[name[net_params].initialize, parameter[call[name[_mx].init.Xavier, parameter[]]]]
call[call[name[net_params]][constant[conv7_weight]].initialize, parameter[call[name[_mx].init.Xavier, parameter[]]]]
call[call[name[net_params]][constant[conv8_weight]].initialize, parameter[call[name[_mx].init.Uniform, parameter[constant[5e-05]]]]]
variable[bias] assign[=] call[name[_np].zeros, parameter[name[output_size]]]
<ast.AugAssign object at 0x7da1b1fe7910>
from relative_module[_mx_detector] import module[ConstantArray]
call[call[name[net_params]][constant[conv8_bias]].initialize, parameter[call[name[ConstantArray], parameter[name[bias]]], name[ctx]]]
call[call[name[ref_model].available_parameters_subset, parameter[name[net_params]]].load, parameter[name[ref_model].model_path, name[ctx]]]
variable[column_names] assign[=] list[[<ast.Constant object at 0x7da1b1fe7ac0>, <ast.Constant object at 0x7da1b1fe71c0>, <ast.Constant object at 0x7da1b1fe6f50>]]
variable[num_columns] assign[=] call[name[len], parameter[name[column_names]]]
variable[column_width] assign[=] binary_operation[call[name[max], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b1f71ab0>, name[column_names]]]]] + constant[2]]
variable[hr] assign[=] binary_operation[binary_operation[constant[+] + call[constant[+].join, parameter[binary_operation[list[[<ast.BinOp object at 0x7da1b1f71690>]] * name[num_columns]]]]] + constant[+]]
variable[progress] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f71cf0>, <ast.Constant object at 0x7da1b1f70af0>], [<ast.Constant object at 0x7da1b1f71720>, <ast.Constant object at 0x7da1b1f71c60>]]
variable[iteration] assign[=] constant[0]
def function[update_progress, parameter[cur_loss, iteration]]:
variable[iteration_base1] assign[=] binary_operation[name[iteration] + constant[1]]
if compare[call[name[progress]][constant[smoothed_loss]] is constant[None]] begin[:]
call[name[progress]][constant[smoothed_loss]] assign[=] name[cur_loss]
variable[cur_time] assign[=] call[name[_time].time, parameter[]]
if <ast.BoolOp object at 0x7da1b208c6d0> begin[:]
call[name[print], parameter[name[hr]]]
call[name[print], parameter[call[binary_operation[binary_operation[constant[| {:<{width}}] * name[num_columns]] + constant[|]].format, parameter[<ast.Starred object at 0x7da1b208c0d0>]]]]
call[name[print], parameter[name[hr]]]
if <ast.BoolOp object at 0x7da1b208c280> begin[:]
variable[elapsed_time] assign[=] binary_operation[name[cur_time] - name[start_time]]
call[name[print], parameter[call[constant[| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|].format, parameter[]]]]
call[name[progress]][constant[last_time]] assign[=] name[cur_time]
if name[use_mps] begin[:]
call[name[net].forward, parameter[call[name[_mx].nd.uniform, parameter[constant[0], constant[1], binary_operation[tuple[[<ast.Name object at 0x7da1b1f8f670>]] + name[input_image_shape]]]]]]
variable[mps_net_params] assign[=] dictionary[[], []]
variable[keys] assign[=] call[name[list], parameter[name[net_params]]]
for taget[name[k]] in starred[name[keys]] begin[:]
call[name[mps_net_params]][name[k]] assign[=] call[call[call[name[net_params]][name[k]].data, parameter[]].asnumpy, parameter[]]
variable[mps_loss_mult] assign[=] call[name[params]][constant[mps_loss_mult]]
variable[mps_config] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f8d690>, <ast.Constant object at 0x7da1b1f8d4b0>, <ast.Constant object at 0x7da1b1f8f610>, <ast.Constant object at 0x7da1b1f8f040>, <ast.Constant object at 0x7da1b1f8f580>, <ast.Constant object at 0x7da1b1f8edd0>, <ast.Constant object at 0x7da1b1f8fd60>, <ast.Constant object at 0x7da1b1f8d0f0>, <ast.Constant object at 0x7da1b1f8f850>, <ast.Constant object at 0x7da1b1f8f1f0>, <ast.Constant object at 0x7da1b1f8e710>, <ast.Constant object at 0x7da1b1f8c640>, <ast.Constant object at 0x7da1b1f8eb60>, <ast.Constant object at 0x7da1b1f8dd20>, <ast.Constant object at 0x7da1b1f8f2b0>], [<ast.Attribute object at 0x7da1b1f8ea70>, <ast.Constant object at 0x7da1b1f8ce50>, <ast.BinOp object at 0x7da1b1f8d780>, <ast.BinOp object at 0x7da1b1f8dcc0>, <ast.Subscript object at 0x7da1b1f8dc30>, <ast.Constant object at 0x7da1b1f8f070>, <ast.Constant object at 0x7da1b1f8ded0>, <ast.BinOp object at 0x7da1b1f8fd30>, <ast.BinOp object at 0x7da1b1f8c160>, <ast.BinOp object at 0x7da1b1f8c8e0>, <ast.BinOp object at 0x7da1b1f8c4c0>, <ast.BinOp object at 0x7da1b1f8e0b0>, <ast.Constant object at 0x7da1b1f8fee0>, <ast.Constant object at 0x7da1b1f8f910>, <ast.Subscript object at 0x7da1b1f8efe0>]]
variable[mps_net] assign[=] call[name[_get_mps_od_net], parameter[]]
variable[mxnet_batch_queue] assign[=] call[name[_Queue], parameter[constant[1]]]
variable[numpy_batch_queue] assign[=] call[name[_Queue], parameter[constant[1]]]
def function[sframe_worker, parameter[]]:
for taget[name[batch]] in starred[name[loader]] begin[:]
call[name[mxnet_batch_queue].put, parameter[name[batch]]]
call[name[mxnet_batch_queue].put, parameter[constant[None]]]
def function[numpy_worker, parameter[]]:
while constant[True] begin[:]
variable[batch] assign[=] call[name[mxnet_batch_queue].get, parameter[]]
if compare[name[batch] is constant[None]] begin[:]
break
for taget[tuple[[<ast.Name object at 0x7da1b205abc0>, <ast.Name object at 0x7da1b205b4f0>]]] in starred[call[name[zip], parameter[name[batch].data, name[batch].label]]] begin[:]
variable[input_data] assign[=] call[name[_mxnet_to_mps], parameter[call[name[x].asnumpy, parameter[]]]]
variable[label_data] assign[=] call[call[name[y].asnumpy, parameter[]].reshape, parameter[binary_operation[call[name[y].shape][<ast.Slice object at 0x7da1b2059780>] + tuple[[<ast.UnaryOp object at 0x7da1b205bbb0>]]]]]
variable[input_data] assign[=] call[name[input_data].astype, parameter[name[_np].float32]]
if <ast.UnaryOp object at 0x7da1b205b4c0> begin[:]
variable[input_data] assign[=] call[name[input_data].copy, parameter[]]
variable[label_data] assign[=] call[name[label_data].astype, parameter[name[_np].float32]]
if <ast.UnaryOp object at 0x7da1b20599c0> begin[:]
variable[label_data] assign[=] call[name[label_data].copy, parameter[]]
call[name[numpy_batch_queue].put, parameter[dictionary[[<ast.Constant object at 0x7da1b2058130>, <ast.Constant object at 0x7da1b205b130>, <ast.Constant object at 0x7da1b205a650>], [<ast.Name object at 0x7da1b205beb0>, <ast.Name object at 0x7da1b205a410>, <ast.Attribute object at 0x7da1b205a3b0>]]]]
call[name[numpy_batch_queue].put, parameter[constant[None]]]
variable[sframe_worker_thread] assign[=] call[name[_Thread], parameter[]]
call[name[sframe_worker_thread].start, parameter[]]
variable[numpy_worker_thread] assign[=] call[name[_Thread], parameter[]]
call[name[numpy_worker_thread].start, parameter[]]
variable[batch_queue] assign[=] list[[]]
def function[wait_for_batch, parameter[]]:
variable[pending_loss] assign[=] call[name[batch_queue].pop, parameter[constant[0]]]
variable[batch_loss] assign[=] call[name[pending_loss].asnumpy, parameter[]]
return[binary_operation[call[name[batch_loss].sum, parameter[]] / name[mps_loss_mult]]]
while constant[True] begin[:]
variable[batch] assign[=] call[name[numpy_batch_queue].get, parameter[]]
if compare[name[batch] is constant[None]] begin[:]
break
if compare[call[name[batch]][constant[iteration]] in name[steps]] begin[:]
variable[ii] assign[=] binary_operation[call[name[steps].index, parameter[call[name[batch]][constant[iteration]]]] + constant[1]]
variable[new_lr] assign[=] binary_operation[call[name[factors]][name[ii]] * name[base_lr]]
call[name[mps_net].set_learning_rate, parameter[binary_operation[name[new_lr] / name[mps_loss_mult]]]]
call[name[batch_queue].append, parameter[call[name[mps_net].train, parameter[call[name[batch]][constant[input]], call[name[batch]][constant[label]]]]]]
if compare[call[name[len], parameter[name[batch_queue]]] greater[>] constant[1]] begin[:]
variable[cur_loss] assign[=] call[name[wait_for_batch], parameter[]]
if compare[call[name[batch]][constant[iteration]] greater[>] name[iteration]] begin[:]
call[name[update_progress], parameter[name[cur_loss], name[iteration]]]
variable[iteration] assign[=] call[name[batch]][constant[iteration]]
while compare[call[name[len], parameter[name[batch_queue]]] greater[>] constant[0]] begin[:]
variable[cur_loss] assign[=] call[name[wait_for_batch], parameter[]]
call[name[update_progress], parameter[name[cur_loss], name[iteration]]]
call[name[sframe_worker_thread].join, parameter[]]
call[name[numpy_worker_thread].join, parameter[]]
variable[mps_net_params] assign[=] call[name[mps_net].export, parameter[]]
variable[keys] assign[=] call[name[mps_net_params].keys, parameter[]]
for taget[name[k]] in starred[name[keys]] begin[:]
if compare[name[k] in name[net_params]] begin[:]
call[call[name[net_params]][name[k]].set_data, parameter[call[name[mps_net_params]][name[k]]]]
variable[training_time] assign[=] binary_operation[call[name[_time].time, parameter[]] - name[start_time]]
if name[verbose] begin[:]
call[name[print], parameter[name[hr]]]
variable[training_iterations] assign[=] binary_operation[name[iteration] + constant[1]]
variable[state] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f762c0>, <ast.Constant object at 0x7da1b1f75f60>, <ast.Constant object at 0x7da1b1f776d0>, <ast.Constant object at 0x7da1b1f741f0>, <ast.Constant object at 0x7da1b1f75b40>, <ast.Constant object at 0x7da1b1f76ad0>, <ast.Constant object at 0x7da1b1f76470>, <ast.Constant object at 0x7da1b1f75d50>, <ast.Constant object at 0x7da1b1f77dc0>, <ast.Constant object at 0x7da1b1f74a00>, <ast.Constant object at 0x7da1b1f779d0>, <ast.Constant object at 0x7da1b1f75de0>, <ast.Constant object at 0x7da1b1f77b80>, <ast.Constant object at 0x7da1b1f76380>, <ast.Constant object at 0x7da1b1f743a0>, <ast.Constant object at 0x7da1b1f75ff0>, <ast.Constant object at 0x7da1b1f76920>, <ast.Constant object at 0x7da1b1f75d80>, <ast.Constant object at 0x7da1b1f74d30>, <ast.Constant object at 0x7da1b1f74310>], [<ast.Name object at 0x7da1b1f74f70>, <ast.Name object at 0x7da1b1f76440>, <ast.Call object at 0x7da1b1f76a70>, <ast.Name object at 0x7da1b1f75750>, <ast.Name object at 0x7da1b1f75c60>, <ast.Name object at 0x7da1b1f753f0>, <ast.Name object at 0x7da1b1f76e00>, <ast.Name object at 0x7da1b1f74790>, <ast.Name object at 0x7da1b1f77070>, <ast.Name object at 0x7da1b1f74340>, <ast.Subscript object at 0x7da1b1f74730>, <ast.Name object at 0x7da1b1f75ed0>, <ast.Name object at 0x7da1b1f76d70>, <ast.Name object at 0x7da1b1f763b0>, <ast.Name object at 0x7da1b1f742b0>, <ast.Name object at 0x7da1b1f760e0>, <ast.BinOp object at 0x7da1b1f76b00>, <ast.Name object at 0x7da1b1f74f10>, <ast.Name object at 0x7da1b1f777c0>, <ast.Subscript object at 0x7da1b1f770a0>]]
return[call[name[ObjectDetector], parameter[name[state]]]] | keyword[def] identifier[create] ( identifier[dataset] , identifier[annotations] = keyword[None] , identifier[feature] = keyword[None] , identifier[model] = literal[string] ,
identifier[classes] = keyword[None] , identifier[batch_size] = literal[int] , identifier[max_iterations] = literal[int] , identifier[verbose] = keyword[True] ,
** identifier[kwargs] ):
literal[string]
identifier[_raise_error_if_not_sframe] ( identifier[dataset] , literal[string] )
keyword[from] . identifier[_mx_detector] keyword[import] identifier[YOLOLoss] keyword[as] identifier[_YOLOLoss]
keyword[from] . identifier[_model] keyword[import] identifier[tiny_darknet] keyword[as] identifier[_tiny_darknet]
keyword[from] . identifier[_sframe_loader] keyword[import] identifier[SFrameDetectionIter] keyword[as] identifier[_SFrameDetectionIter]
keyword[from] . identifier[_manual_scheduler] keyword[import] identifier[ManualScheduler] keyword[as] identifier[_ManualScheduler]
keyword[import] identifier[mxnet] keyword[as] identifier[_mx]
keyword[from] .. identifier[_mxnet] keyword[import] identifier[_mxnet_utils]
keyword[if] identifier[len] ( identifier[dataset] )== literal[int] :
keyword[raise] identifier[_ToolkitError] ( literal[string] )
identifier[_numeric_param_check_range] ( literal[string] , identifier[max_iterations] , literal[int] , identifier[_six] . identifier[MAXSIZE] )
identifier[start_time] = identifier[_time] . identifier[time] ()
identifier[supported_detectors] =[ literal[string] ]
keyword[if] identifier[feature] keyword[is] keyword[None] :
identifier[feature] = identifier[_tkutl] . identifier[_find_only_image_column] ( identifier[dataset] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] % identifier[feature] )
keyword[if] identifier[annotations] keyword[is] keyword[None] :
identifier[annotations] = identifier[_tkutl] . identifier[_find_only_column_of_type] ( identifier[dataset] ,
identifier[target_type] =[ identifier[list] , identifier[dict] ],
identifier[type_name] = literal[string] ,
identifier[col_name] = literal[string] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] % identifier[annotations] )
identifier[_raise_error_if_not_detection_sframe] ( identifier[dataset] , identifier[feature] , identifier[annotations] ,
identifier[require_annotations] = keyword[True] )
identifier[is_annotations_list] = identifier[dataset] [ identifier[annotations] ]. identifier[dtype] == identifier[list]
identifier[_tkutl] . identifier[_check_categorical_option_type] ( literal[string] , identifier[model] ,
identifier[supported_detectors] )
identifier[base_model] = identifier[model] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ]
identifier[ref_model] = identifier[_pre_trained_models] . identifier[OBJECT_DETECTION_BASE_MODELS] [ identifier[base_model] ]()
identifier[params] ={
literal[string] :[
( literal[int] , literal[int] ),( literal[int] , literal[int] ),( literal[int] , literal[int] ),
( literal[int] , literal[int] ),( literal[int] , literal[int] ),( literal[int] , literal[int] ),
( literal[int] , literal[int] ),( literal[int] , literal[int] ),( literal[int] , literal[int] ),
( literal[int] , literal[int] ),( literal[int] , literal[int] ),( literal[int] , literal[int] ),
( literal[int] , literal[int] ),( literal[int] , literal[int] ),( literal[int] , literal[int] ),
],
literal[string] :[ literal[int] , literal[int] ],
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : keyword[True] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] :( literal[int] , literal[int] ),
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : keyword[True] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : keyword[True] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
}
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[new_keys] = identifier[set] ( identifier[kwargs] [ literal[string] ]. identifier[keys] ())
identifier[set_keys] = identifier[set] ( identifier[params] . identifier[keys] ())
identifier[unsupported] = identifier[new_keys] - identifier[set_keys]
keyword[if] identifier[unsupported] :
keyword[raise] identifier[_ToolkitError] ( literal[string] . identifier[format] ( identifier[unsupported] ))
identifier[params] . identifier[update] ( identifier[kwargs] [ literal[string] ])
identifier[anchors] = identifier[params] [ literal[string] ]
identifier[num_anchors] = identifier[len] ( identifier[anchors] )
keyword[if] identifier[batch_size] < literal[int] :
identifier[batch_size] = literal[int]
identifier[cuda_gpus] = identifier[_mxnet_utils] . identifier[get_gpus_in_use] ( identifier[max_devices] = identifier[batch_size] )
identifier[num_mxnet_gpus] = identifier[len] ( identifier[cuda_gpus] )
identifier[use_mps] = identifier[_use_mps] () keyword[and] identifier[num_mxnet_gpus] == literal[int]
identifier[batch_size_each] = identifier[batch_size] // identifier[max] ( identifier[num_mxnet_gpus] , literal[int] )
keyword[if] identifier[use_mps] keyword[and] identifier[_mps_device_memory_limit] ()< literal[int] * literal[int] * literal[int] * literal[int] :
identifier[batch_size_each] = literal[int]
identifier[batch_size] = identifier[max] ( identifier[num_mxnet_gpus] , literal[int] )* identifier[batch_size_each]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] . identifier[format] ( identifier[batch_size] ))
identifier[io_thread_buffer_size] = identifier[params] [ literal[string] ] keyword[if] identifier[use_mps] keyword[else] literal[int]
keyword[if] identifier[verbose] :
identifier[cuda_mem_req] = literal[int] + identifier[batch_size_each] * literal[int]
identifier[_tkutl] . identifier[_print_neural_compute_device] ( identifier[cuda_gpus] = identifier[cuda_gpus] , identifier[use_mps] = identifier[use_mps] ,
identifier[cuda_mem_req] = identifier[cuda_mem_req] )
identifier[grid_shape] = identifier[params] [ literal[string] ]
identifier[input_image_shape] =( literal[int] ,
identifier[grid_shape] [ literal[int] ]* identifier[ref_model] . identifier[spatial_reduction] ,
identifier[grid_shape] [ literal[int] ]* identifier[ref_model] . identifier[spatial_reduction] )
keyword[try] :
keyword[if] identifier[is_annotations_list] :
identifier[instances] =( identifier[dataset] . identifier[stack] ( identifier[annotations] , identifier[new_column_name] = literal[string] , identifier[drop_na] = keyword[True] )
. identifier[unpack] ( literal[string] , identifier[limit] =[ literal[string] ]))
keyword[else] :
identifier[instances] = identifier[dataset] . identifier[rename] ({ identifier[annotations] : literal[string] }). identifier[dropna] ( literal[string] )
identifier[instances] = identifier[instances] . identifier[unpack] ( literal[string] , identifier[limit] =[ literal[string] ])
keyword[except] ( identifier[TypeError] , identifier[RuntimeError] ):
keyword[raise] identifier[_ToolkitError] ( literal[string]
literal[string] )
identifier[num_images] = identifier[len] ( identifier[dataset] )
identifier[num_instances] = identifier[len] ( identifier[instances] )
keyword[if] identifier[classes] keyword[is] keyword[None] :
identifier[classes] = identifier[instances] [ literal[string] ]. identifier[unique] ()
identifier[classes] = identifier[sorted] ( identifier[classes] )
identifier[class_to_index] ={ identifier[name] : identifier[index] keyword[for] identifier[index] , identifier[name] keyword[in] identifier[enumerate] ( identifier[classes] )}
identifier[num_classes] = identifier[len] ( identifier[classes] )
keyword[if] identifier[max_iterations] == literal[int] :
identifier[num_iterations_raw] = literal[int] * identifier[_np] . identifier[sqrt] ( identifier[num_instances] )/ identifier[batch_size]
identifier[num_iterations] = literal[int] * identifier[max] ( literal[int] , identifier[int] ( identifier[round] ( identifier[num_iterations_raw] / literal[int] )))
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] . identifier[format] ( identifier[num_iterations] ))
keyword[else] :
identifier[num_iterations] = identifier[max_iterations]
identifier[loader] = identifier[_SFrameDetectionIter] ( identifier[dataset] ,
identifier[batch_size] = identifier[batch_size] ,
identifier[input_shape] = identifier[input_image_shape] [ literal[int] :],
identifier[output_shape] = identifier[grid_shape] ,
identifier[anchors] = identifier[anchors] ,
identifier[class_to_index] = identifier[class_to_index] ,
identifier[aug_params] = identifier[params] ,
identifier[shuffle] = identifier[params] [ literal[string] ],
identifier[loader_type] = literal[string] ,
identifier[feature_column] = identifier[feature] ,
identifier[annotations_column] = identifier[annotations] ,
identifier[io_thread_buffer_size] = identifier[io_thread_buffer_size] ,
identifier[iterations] = identifier[num_iterations] )
identifier[preds_per_box] = literal[int] + identifier[num_classes]
identifier[output_size] = identifier[preds_per_box] * identifier[num_anchors]
identifier[ymap_shape] =( identifier[batch_size_each] ,)+ identifier[tuple] ( identifier[grid_shape] )+( identifier[num_anchors] , identifier[preds_per_box] )
identifier[net] = identifier[_tiny_darknet] ( identifier[output_size] = identifier[output_size] )
identifier[loss] = identifier[_YOLOLoss] ( identifier[input_shape] = identifier[input_image_shape] [ literal[int] :],
identifier[output_shape] = identifier[grid_shape] ,
identifier[batch_size] = identifier[batch_size_each] ,
identifier[num_classes] = identifier[num_classes] ,
identifier[anchors] = identifier[anchors] ,
identifier[parameters] = identifier[params] )
identifier[base_lr] = identifier[params] [ literal[string] ]
identifier[steps] =[ identifier[num_iterations] // literal[int] , literal[int] * identifier[num_iterations] // literal[int] , identifier[num_iterations] ]
identifier[steps_and_factors] =[( identifier[step] , literal[int] **(- identifier[i] )) keyword[for] identifier[i] , identifier[step] keyword[in] identifier[enumerate] ( identifier[steps] )]
identifier[steps] , identifier[factors] = identifier[zip] (* identifier[steps_and_factors] )
identifier[lr_scheduler] = identifier[_ManualScheduler] ( identifier[step] = identifier[steps] , identifier[factor] = identifier[factors] )
identifier[ctx] = identifier[_mxnet_utils] . identifier[get_mxnet_context] ( identifier[max_devices] = identifier[batch_size] )
identifier[net_params] = identifier[net] . identifier[collect_params] ()
identifier[net_params] . identifier[initialize] ( identifier[_mx] . identifier[init] . identifier[Xavier] (), identifier[ctx] = identifier[ctx] )
identifier[net_params] [ literal[string] ]. identifier[initialize] ( identifier[_mx] . identifier[init] . identifier[Xavier] ( identifier[factor_type] = literal[string] ), identifier[ctx] = identifier[ctx] , identifier[force_reinit] = keyword[True] )
identifier[net_params] [ literal[string] ]. identifier[initialize] ( identifier[_mx] . identifier[init] . identifier[Uniform] ( literal[int] ), identifier[ctx] = identifier[ctx] , identifier[force_reinit] = keyword[True] )
identifier[bias] = identifier[_np] . identifier[zeros] ( identifier[output_size] , identifier[dtype] = identifier[_np] . identifier[float32] )
identifier[bias] [ literal[int] :: identifier[preds_per_box] ]-= literal[int]
keyword[from] . identifier[_mx_detector] keyword[import] identifier[ConstantArray]
identifier[net_params] [ literal[string] ]. identifier[initialize] ( identifier[ConstantArray] ( identifier[bias] ), identifier[ctx] , identifier[force_reinit] = keyword[True] )
identifier[ref_model] . identifier[available_parameters_subset] ( identifier[net_params] ). identifier[load] ( identifier[ref_model] . identifier[model_path] , identifier[ctx] )
identifier[column_names] =[ literal[string] , literal[string] , literal[string] ]
identifier[num_columns] = identifier[len] ( identifier[column_names] )
identifier[column_width] = identifier[max] ( identifier[map] ( keyword[lambda] identifier[x] : identifier[len] ( identifier[x] ), identifier[column_names] ))+ literal[int]
identifier[hr] = literal[string] + literal[string] . identifier[join] ([ literal[string] * identifier[column_width] ]* identifier[num_columns] )+ literal[string]
identifier[progress] ={ literal[string] : keyword[None] , literal[string] : literal[int] }
identifier[iteration] = literal[int]
keyword[def] identifier[update_progress] ( identifier[cur_loss] , identifier[iteration] ):
identifier[iteration_base1] = identifier[iteration] + literal[int]
keyword[if] identifier[progress] [ literal[string] ] keyword[is] keyword[None] :
identifier[progress] [ literal[string] ]= identifier[cur_loss]
keyword[else] :
identifier[progress] [ literal[string] ]= literal[int] * identifier[progress] [ literal[string] ]+ literal[int] * identifier[cur_loss]
identifier[cur_time] = identifier[_time] . identifier[time] ()
keyword[if] identifier[verbose] keyword[and] identifier[iteration] == literal[int] :
identifier[print] ( identifier[hr] )
identifier[print] (( literal[string] * identifier[num_columns] + literal[string] ). identifier[format] (* identifier[column_names] , identifier[width] = identifier[column_width] - literal[int] ))
identifier[print] ( identifier[hr] )
keyword[if] identifier[verbose] keyword[and] ( identifier[cur_time] > identifier[progress] [ literal[string] ]+ literal[int] keyword[or]
identifier[iteration_base1] == identifier[max_iterations] ):
identifier[elapsed_time] = identifier[cur_time] - identifier[start_time]
identifier[print] ( literal[string] . identifier[format] (
identifier[cur_iter] = identifier[iteration_base1] , identifier[loss] = identifier[progress] [ literal[string] ],
identifier[time] = identifier[elapsed_time] , identifier[width] = identifier[column_width] - literal[int] ))
identifier[progress] [ literal[string] ]= identifier[cur_time]
keyword[if] identifier[use_mps] :
identifier[net] . identifier[forward] ( identifier[_mx] . identifier[nd] . identifier[uniform] ( literal[int] , literal[int] ,( identifier[batch_size_each] ,)+ identifier[input_image_shape] ))
identifier[mps_net_params] ={}
identifier[keys] = identifier[list] ( identifier[net_params] )
keyword[for] identifier[k] keyword[in] identifier[keys] :
identifier[mps_net_params] [ identifier[k] ]= identifier[net_params] [ identifier[k] ]. identifier[data] (). identifier[asnumpy] ()
identifier[mps_loss_mult] = identifier[params] [ literal[string] ]
identifier[mps_config] ={
literal[string] : identifier[_MpsGraphMode] . identifier[Train] ,
literal[string] : keyword[True] ,
literal[string] : identifier[base_lr] / identifier[params] [ literal[string] ],
literal[string] : identifier[params] . identifier[get] ( literal[string] , literal[int] )* identifier[mps_loss_mult] ,
literal[string] : identifier[params] [ literal[string] ],
literal[string] : keyword[True] ,
literal[string] : keyword[True] ,
literal[string] : identifier[params] [ literal[string] ]* identifier[mps_loss_mult] ,
literal[string] : identifier[params] [ literal[string] ]* identifier[mps_loss_mult] ,
literal[string] : identifier[params] [ literal[string] ]* identifier[mps_loss_mult] ,
literal[string] : identifier[params] [ literal[string] ]* identifier[mps_loss_mult] ,
literal[string] : identifier[params] [ literal[string] ]* identifier[mps_loss_mult] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : identifier[params] [ literal[string] ],
}
identifier[mps_net] = identifier[_get_mps_od_net] ( identifier[input_image_shape] = identifier[input_image_shape] ,
identifier[batch_size] = identifier[batch_size] ,
identifier[output_size] = identifier[output_size] ,
identifier[anchors] = identifier[anchors] ,
identifier[config] = identifier[mps_config] ,
identifier[weights] = identifier[mps_net_params] )
identifier[mxnet_batch_queue] = identifier[_Queue] ( literal[int] )
identifier[numpy_batch_queue] = identifier[_Queue] ( literal[int] )
keyword[def] identifier[sframe_worker] ():
keyword[for] identifier[batch] keyword[in] identifier[loader] :
identifier[mxnet_batch_queue] . identifier[put] ( identifier[batch] )
identifier[mxnet_batch_queue] . identifier[put] ( keyword[None] )
keyword[def] identifier[numpy_worker] ():
keyword[while] keyword[True] :
identifier[batch] = identifier[mxnet_batch_queue] . identifier[get] ()
keyword[if] identifier[batch] keyword[is] keyword[None] :
keyword[break]
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[zip] ( identifier[batch] . identifier[data] , identifier[batch] . identifier[label] ):
identifier[input_data] = identifier[_mxnet_to_mps] ( identifier[x] . identifier[asnumpy] ())
identifier[label_data] = identifier[y] . identifier[asnumpy] (). identifier[reshape] ( identifier[y] . identifier[shape] [:- literal[int] ]+(- literal[int] ,))
identifier[input_data] = identifier[input_data] . identifier[astype] ( identifier[_np] . identifier[float32] )
keyword[if] keyword[not] identifier[input_data] . identifier[flags] . identifier[c_contiguous] :
identifier[input_data] = identifier[input_data] . identifier[copy] ()
identifier[label_data] = identifier[label_data] . identifier[astype] ( identifier[_np] . identifier[float32] )
keyword[if] keyword[not] identifier[label_data] . identifier[flags] . identifier[c_contiguous] :
identifier[label_data] = identifier[label_data] . identifier[copy] ()
identifier[numpy_batch_queue] . identifier[put] ({ literal[string] : identifier[input_data] ,
literal[string] : identifier[label_data] ,
literal[string] : identifier[batch] . identifier[iteration] })
identifier[numpy_batch_queue] . identifier[put] ( keyword[None] )
identifier[sframe_worker_thread] = identifier[_Thread] ( identifier[target] = identifier[sframe_worker] )
identifier[sframe_worker_thread] . identifier[start] ()
identifier[numpy_worker_thread] = identifier[_Thread] ( identifier[target] = identifier[numpy_worker] )
identifier[numpy_worker_thread] . identifier[start] ()
identifier[batch_queue] =[]
keyword[def] identifier[wait_for_batch] ():
identifier[pending_loss] = identifier[batch_queue] . identifier[pop] ( literal[int] )
identifier[batch_loss] = identifier[pending_loss] . identifier[asnumpy] ()
keyword[return] identifier[batch_loss] . identifier[sum] ()/ identifier[mps_loss_mult]
keyword[while] keyword[True] :
identifier[batch] = identifier[numpy_batch_queue] . identifier[get] ()
keyword[if] identifier[batch] keyword[is] keyword[None] :
keyword[break]
keyword[if] identifier[batch] [ literal[string] ] keyword[in] identifier[steps] :
identifier[ii] = identifier[steps] . identifier[index] ( identifier[batch] [ literal[string] ])+ literal[int]
identifier[new_lr] = identifier[factors] [ identifier[ii] ]* identifier[base_lr]
identifier[mps_net] . identifier[set_learning_rate] ( identifier[new_lr] / identifier[mps_loss_mult] )
identifier[batch_queue] . identifier[append] ( identifier[mps_net] . identifier[train] ( identifier[batch] [ literal[string] ], identifier[batch] [ literal[string] ]))
keyword[if] identifier[len] ( identifier[batch_queue] )> literal[int] :
identifier[cur_loss] = identifier[wait_for_batch] ()
keyword[if] identifier[batch] [ literal[string] ]> identifier[iteration] :
identifier[update_progress] ( identifier[cur_loss] , identifier[iteration] )
identifier[iteration] = identifier[batch] [ literal[string] ]
keyword[while] identifier[len] ( identifier[batch_queue] )> literal[int] :
identifier[cur_loss] = identifier[wait_for_batch] ()
identifier[update_progress] ( identifier[cur_loss] , identifier[iteration] )
identifier[sframe_worker_thread] . identifier[join] ()
identifier[numpy_worker_thread] . identifier[join] ()
identifier[mps_net_params] = identifier[mps_net] . identifier[export] ()
identifier[keys] = identifier[mps_net_params] . identifier[keys] ()
keyword[for] identifier[k] keyword[in] identifier[keys] :
keyword[if] identifier[k] keyword[in] identifier[net_params] :
identifier[net_params] [ identifier[k] ]. identifier[set_data] ( identifier[mps_net_params] [ identifier[k] ])
keyword[else] :
identifier[net] . identifier[hybridize] ()
identifier[options] ={ literal[string] : identifier[base_lr] , literal[string] : identifier[lr_scheduler] ,
literal[string] : identifier[params] [ literal[string] ], literal[string] : identifier[params] [ literal[string] ], literal[string] : literal[int] }
identifier[clip_grad] = identifier[params] . identifier[get] ( literal[string] )
keyword[if] identifier[clip_grad] :
identifier[options] [ literal[string] ]= identifier[clip_grad]
identifier[trainer] = identifier[_mx] . identifier[gluon] . identifier[Trainer] ( identifier[net] . identifier[collect_params] (), literal[string] , identifier[options] )
keyword[for] identifier[batch] keyword[in] identifier[loader] :
identifier[data] = identifier[_mx] . identifier[gluon] . identifier[utils] . identifier[split_and_load] ( identifier[batch] . identifier[data] [ literal[int] ], identifier[ctx_list] = identifier[ctx] , identifier[batch_axis] = literal[int] )
identifier[label] = identifier[_mx] . identifier[gluon] . identifier[utils] . identifier[split_and_load] ( identifier[batch] . identifier[label] [ literal[int] ], identifier[ctx_list] = identifier[ctx] , identifier[batch_axis] = literal[int] )
identifier[Ls] =[]
identifier[Zs] =[]
keyword[with] identifier[_mx] . identifier[autograd] . identifier[record] ():
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[zip] ( identifier[data] , identifier[label] ):
identifier[z] = identifier[net] ( identifier[x] )
identifier[z0] = identifier[_mx] . identifier[nd] . identifier[transpose] ( identifier[z] ,[ literal[int] , literal[int] , literal[int] , literal[int] ]). identifier[reshape] ( identifier[ymap_shape] )
identifier[L] = identifier[loss] ( identifier[z0] , identifier[y] )
identifier[Ls] . identifier[append] ( identifier[L] )
keyword[for] identifier[L] keyword[in] identifier[Ls] :
identifier[L] . identifier[backward] ()
identifier[trainer] . identifier[step] ( literal[int] )
identifier[cur_loss] = identifier[_np] . identifier[mean] ([ identifier[L] . identifier[asnumpy] ()[ literal[int] ] keyword[for] identifier[L] keyword[in] identifier[Ls] ])
identifier[update_progress] ( identifier[cur_loss] , identifier[batch] . identifier[iteration] )
identifier[iteration] = identifier[batch] . identifier[iteration]
identifier[training_time] = identifier[_time] . identifier[time] ()- identifier[start_time]
keyword[if] identifier[verbose] :
identifier[print] ( identifier[hr] )
identifier[training_iterations] = identifier[iteration] + literal[int]
identifier[state] ={
literal[string] : identifier[net] ,
literal[string] : identifier[class_to_index] ,
literal[string] : identifier[_seconds_as_string] ( identifier[training_time] ),
literal[string] : identifier[grid_shape] ,
literal[string] : identifier[anchors] ,
literal[string] : identifier[model] ,
literal[string] : identifier[classes] ,
literal[string] : identifier[batch_size] ,
literal[string] : identifier[input_image_shape] ,
literal[string] : identifier[feature] ,
literal[string] : identifier[params] [ literal[string] ],
literal[string] : identifier[annotations] ,
literal[string] : identifier[num_classes] ,
literal[string] : identifier[num_images] ,
literal[string] : identifier[num_instances] ,
literal[string] : identifier[training_time] ,
literal[string] : identifier[training_iterations] * identifier[batch_size] // identifier[num_images] ,
literal[string] : identifier[training_iterations] ,
literal[string] : identifier[max_iterations] ,
literal[string] : identifier[progress] [ literal[string] ],
}
keyword[return] identifier[ObjectDetector] ( identifier[state] ) | def create(dataset, annotations=None, feature=None, model='darknet-yolo', classes=None, batch_size=0, max_iterations=0, verbose=True, **kwargs):
"""
Create a :class:`ObjectDetector` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``annotations``
parameters will be extracted for training the detector.
annotations : string
Name of the column containing the object detection annotations. This
column should be a list of dictionaries (or a single dictionary), with
each dictionary representing a bounding box of an object instance. Here
is an example of the annotations for a single image with two object
instances::
[{'label': 'dog',
'type': 'rectangle',
'coordinates': {'x': 223, 'y': 198,
'width': 130, 'height': 230}},
{'label': 'cat',
'type': 'rectangle',
'coordinates': {'x': 40, 'y': 73,
'width': 80, 'height': 123}}]
The value for `x` is the horizontal center of the box paired with
`width` and `y` is the vertical center of the box paired with `height`.
'None' (the default) indicates the only list column in `dataset` should
be used for the annotations.
feature : string
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
model : string optional
Object detection model to use:
- "darknet-yolo" : Fast and medium-sized model
classes : list optional
List of strings containing the names of the classes of objects.
Inferred from the data if not provided.
batch_size: int
The number of images per training iteration. If 0, then it will be
automatically determined based on resource availability.
max_iterations : int
The number of training iterations. If 0, then it will be automatically
be determined based on the amount of data you provide.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ObjectDetector
A trained :class:`ObjectDetector` model.
See Also
--------
ObjectDetector
Examples
--------
.. sourcecode:: python
# Train an object detector model
>>> model = turicreate.object_detector.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_raise_error_if_not_sframe(dataset, 'dataset')
from ._mx_detector import YOLOLoss as _YOLOLoss
from ._model import tiny_darknet as _tiny_darknet
from ._sframe_loader import SFrameDetectionIter as _SFrameDetectionIter
from ._manual_scheduler import ManualScheduler as _ManualScheduler
import mxnet as _mx
from .._mxnet import _mxnet_utils
if len(dataset) == 0:
raise _ToolkitError('Unable to train on empty dataset') # depends on [control=['if'], data=[]]
_numeric_param_check_range('max_iterations', max_iterations, 0, _six.MAXSIZE)
start_time = _time.time()
supported_detectors = ['darknet-yolo']
if feature is None:
feature = _tkutl._find_only_image_column(dataset)
if verbose:
print("Using '%s' as feature column" % feature) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['feature']]
if annotations is None:
annotations = _tkutl._find_only_column_of_type(dataset, target_type=[list, dict], type_name='list', col_name='annotations')
if verbose:
print("Using '%s' as annotations column" % annotations) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['annotations']]
_raise_error_if_not_detection_sframe(dataset, feature, annotations, require_annotations=True)
is_annotations_list = dataset[annotations].dtype == list
_tkutl._check_categorical_option_type('model', model, supported_detectors)
base_model = model.split('-', 1)[0]
ref_model = _pre_trained_models.OBJECT_DETECTION_BASE_MODELS[base_model]()
# This large buffer size (8 batches) is an attempt to mitigate against
# the SFrame shuffle operation that can occur after each epoch.
params = {'anchors': [(1.0, 2.0), (1.0, 1.0), (2.0, 1.0), (2.0, 4.0), (2.0, 2.0), (4.0, 2.0), (4.0, 8.0), (4.0, 4.0), (8.0, 4.0), (8.0, 16.0), (8.0, 8.0), (16.0, 8.0), (16.0, 32.0), (16.0, 16.0), (32.0, 16.0)], 'grid_shape': [13, 13], 'aug_resize': 0, 'aug_rand_crop': 0.9, 'aug_rand_pad': 0.9, 'aug_rand_gray': 0.0, 'aug_aspect_ratio': 1.25, 'aug_hue': 0.05, 'aug_brightness': 0.05, 'aug_saturation': 0.05, 'aug_contrast': 0.05, 'aug_horizontal_flip': True, 'aug_min_object_covered': 0, 'aug_min_eject_coverage': 0.5, 'aug_area_range': (0.15, 2), 'aug_pca_noise': 0.0, 'aug_max_attempts': 20, 'aug_inter_method': 2, 'lmb_coord_xy': 10.0, 'lmb_coord_wh': 10.0, 'lmb_obj': 100.0, 'lmb_noobj': 5.0, 'lmb_class': 2.0, 'non_maximum_suppression_threshold': 0.45, 'rescore': True, 'clip_gradients': 0.025, 'weight_decay': 0.0005, 'sgd_momentum': 0.9, 'learning_rate': 0.001, 'shuffle': True, 'mps_loss_mult': 8, 'io_thread_buffer_size': 8}
if '_advanced_parameters' in kwargs:
# Make sure no additional parameters are provided
new_keys = set(kwargs['_advanced_parameters'].keys())
set_keys = set(params.keys())
unsupported = new_keys - set_keys
if unsupported:
raise _ToolkitError('Unknown advanced parameters: {}'.format(unsupported)) # depends on [control=['if'], data=[]]
params.update(kwargs['_advanced_parameters']) # depends on [control=['if'], data=['kwargs']]
anchors = params['anchors']
num_anchors = len(anchors)
if batch_size < 1:
batch_size = 32 # Default if not user-specified # depends on [control=['if'], data=['batch_size']]
cuda_gpus = _mxnet_utils.get_gpus_in_use(max_devices=batch_size)
num_mxnet_gpus = len(cuda_gpus)
use_mps = _use_mps() and num_mxnet_gpus == 0
batch_size_each = batch_size // max(num_mxnet_gpus, 1)
if use_mps and _mps_device_memory_limit() < 4 * 1024 * 1024 * 1024:
# Reduce batch size for GPUs with less than 4GB RAM
batch_size_each = 16 # depends on [control=['if'], data=[]]
# Note, this may slightly alter the batch size to fit evenly on the GPUs
batch_size = max(num_mxnet_gpus, 1) * batch_size_each
if verbose:
print("Setting 'batch_size' to {}".format(batch_size)) # depends on [control=['if'], data=[]]
# The IO thread also handles MXNet-powered data augmentation. This seems
# to be problematic to run independently of a MXNet-powered neural network
# in a separate thread. For this reason, we restrict IO threads to when
# the neural network backend is MPS.
io_thread_buffer_size = params['io_thread_buffer_size'] if use_mps else 0
if verbose:
# Estimate memory usage (based on experiments)
cuda_mem_req = 550 + batch_size_each * 85
_tkutl._print_neural_compute_device(cuda_gpus=cuda_gpus, use_mps=use_mps, cuda_mem_req=cuda_mem_req) # depends on [control=['if'], data=[]]
grid_shape = params['grid_shape']
input_image_shape = (3, grid_shape[0] * ref_model.spatial_reduction, grid_shape[1] * ref_model.spatial_reduction)
try:
if is_annotations_list:
instances = dataset.stack(annotations, new_column_name='_bbox', drop_na=True).unpack('_bbox', limit=['label']) # depends on [control=['if'], data=[]]
else:
instances = dataset.rename({annotations: '_bbox'}).dropna('_bbox')
instances = instances.unpack('_bbox', limit=['label']) # depends on [control=['try'], data=[]]
except (TypeError, RuntimeError):
# If this fails, the annotation format isinvalid at the coarsest level
raise _ToolkitError("Annotations format is invalid. Must be a list of dictionaries or single dictionary containing 'label' and 'coordinates'.") # depends on [control=['except'], data=[]]
num_images = len(dataset)
num_instances = len(instances)
if classes is None:
classes = instances['_bbox.label'].unique() # depends on [control=['if'], data=['classes']]
classes = sorted(classes)
# Make a class-to-index look-up table
class_to_index = {name: index for (index, name) in enumerate(classes)}
num_classes = len(classes)
if max_iterations == 0:
# Set number of iterations through a heuristic
num_iterations_raw = 5000 * _np.sqrt(num_instances) / batch_size
num_iterations = 1000 * max(1, int(round(num_iterations_raw / 1000)))
if verbose:
print("Setting 'max_iterations' to {}".format(num_iterations)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
num_iterations = max_iterations
# Create data loader
loader = _SFrameDetectionIter(dataset, batch_size=batch_size, input_shape=input_image_shape[1:], output_shape=grid_shape, anchors=anchors, class_to_index=class_to_index, aug_params=params, shuffle=params['shuffle'], loader_type='augmented', feature_column=feature, annotations_column=annotations, io_thread_buffer_size=io_thread_buffer_size, iterations=num_iterations)
# Predictions per anchor box: x/y + w/h + object confidence + class probs
preds_per_box = 5 + num_classes
output_size = preds_per_box * num_anchors
ymap_shape = (batch_size_each,) + tuple(grid_shape) + (num_anchors, preds_per_box)
net = _tiny_darknet(output_size=output_size)
loss = _YOLOLoss(input_shape=input_image_shape[1:], output_shape=grid_shape, batch_size=batch_size_each, num_classes=num_classes, anchors=anchors, parameters=params)
base_lr = params['learning_rate']
steps = [num_iterations // 2, 3 * num_iterations // 4, num_iterations]
steps_and_factors = [(step, 10 ** (-i)) for (i, step) in enumerate(steps)]
(steps, factors) = zip(*steps_and_factors)
lr_scheduler = _ManualScheduler(step=steps, factor=factors)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
net_params = net.collect_params()
net_params.initialize(_mx.init.Xavier(), ctx=ctx)
net_params['conv7_weight'].initialize(_mx.init.Xavier(factor_type='avg'), ctx=ctx, force_reinit=True)
net_params['conv8_weight'].initialize(_mx.init.Uniform(5e-05), ctx=ctx, force_reinit=True)
# Initialize object confidence low, preventing an unnecessary adjustment
# period toward conservative estimates
bias = _np.zeros(output_size, dtype=_np.float32)
bias[4::preds_per_box] -= 6
from ._mx_detector import ConstantArray
net_params['conv8_bias'].initialize(ConstantArray(bias), ctx, force_reinit=True)
# Take a subset and then load the rest of the parameters. It is possible to
# do allow_missing=True directly on net_params. However, this will more
# easily hide bugs caused by names getting out of sync.
ref_model.available_parameters_subset(net_params).load(ref_model.model_path, ctx)
column_names = ['Iteration', 'Loss', 'Elapsed Time']
num_columns = len(column_names)
column_width = max(map(lambda x: len(x), column_names)) + 2
hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+'
progress = {'smoothed_loss': None, 'last_time': 0}
iteration = 0
def update_progress(cur_loss, iteration):
iteration_base1 = iteration + 1
if progress['smoothed_loss'] is None:
progress['smoothed_loss'] = cur_loss # depends on [control=['if'], data=[]]
else:
progress['smoothed_loss'] = 0.9 * progress['smoothed_loss'] + 0.1 * cur_loss
cur_time = _time.time()
# Printing of table header is deferred, so that start-of-training
# warnings appear above the table
if verbose and iteration == 0:
# Print progress table header
print(hr)
print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width - 1))
print(hr) # depends on [control=['if'], data=[]]
if verbose and (cur_time > progress['last_time'] + 10 or iteration_base1 == max_iterations):
# Print progress table row
elapsed_time = cur_time - start_time
print('| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|'.format(cur_iter=iteration_base1, loss=progress['smoothed_loss'], time=elapsed_time, width=column_width - 1))
progress['last_time'] = cur_time # depends on [control=['if'], data=[]]
if use_mps:
# Force initialization of net_params
# TODO: Do not rely on MXNet to initialize MPS-based network
net.forward(_mx.nd.uniform(0, 1, (batch_size_each,) + input_image_shape))
mps_net_params = {}
keys = list(net_params)
for k in keys:
mps_net_params[k] = net_params[k].data().asnumpy() # depends on [control=['for'], data=['k']]
# Multiplies the loss to move the fp16 gradients away from subnormals
# and gradual underflow. The learning rate is correspondingly divided
# by the same multiple to make training mathematically equivalent. The
# update is done in fp32, which is why this trick works. Does not
# affect how loss is presented to the user.
mps_loss_mult = params['mps_loss_mult']
mps_config = {'mode': _MpsGraphMode.Train, 'use_sgd': True, 'learning_rate': base_lr / params['mps_loss_mult'], 'gradient_clipping': params.get('clip_gradients', 0.0) * mps_loss_mult, 'weight_decay': params['weight_decay'], 'od_include_network': True, 'od_include_loss': True, 'od_scale_xy': params['lmb_coord_xy'] * mps_loss_mult, 'od_scale_wh': params['lmb_coord_wh'] * mps_loss_mult, 'od_scale_no_object': params['lmb_noobj'] * mps_loss_mult, 'od_scale_object': params['lmb_obj'] * mps_loss_mult, 'od_scale_class': params['lmb_class'] * mps_loss_mult, 'od_max_iou_for_no_object': 0.3, 'od_min_iou_for_object': 0.7, 'od_rescore': params['rescore']}
mps_net = _get_mps_od_net(input_image_shape=input_image_shape, batch_size=batch_size, output_size=output_size, anchors=anchors, config=mps_config, weights=mps_net_params)
# Use worker threads to isolate different points of synchronization
# and/or waiting for non-Python tasks to finish. The
# sframe_worker_thread will spend most of its time waiting for SFrame
# operations, largely image I/O and decoding, along with scheduling
# MXNet data augmentation. The numpy_worker_thread will spend most of
# its time waiting for MXNet data augmentation to complete, along with
# copying the results into NumPy arrays. Finally, the main thread will
# spend most of its time copying NumPy data into MPS and waiting for the
# results. Note that using three threads here only makes sense because
# each thread spends time waiting for non-Python code to finish (so that
# no thread hogs the global interpreter lock).
mxnet_batch_queue = _Queue(1)
numpy_batch_queue = _Queue(1)
def sframe_worker():
# Once a batch is loaded into NumPy, pass it immediately to the
# numpy_worker so that we can start I/O and decoding for the next
# batch.
for batch in loader:
mxnet_batch_queue.put(batch) # depends on [control=['for'], data=['batch']]
mxnet_batch_queue.put(None)
def numpy_worker():
while True:
batch = mxnet_batch_queue.get()
if batch is None:
break # depends on [control=['if'], data=[]]
for (x, y) in zip(batch.data, batch.label):
# Convert to NumPy arrays with required shapes. Note that
# asnumpy waits for any pending MXNet operations to finish.
input_data = _mxnet_to_mps(x.asnumpy())
label_data = y.asnumpy().reshape(y.shape[:-2] + (-1,))
# Convert to packed 32-bit arrays.
input_data = input_data.astype(_np.float32)
if not input_data.flags.c_contiguous:
input_data = input_data.copy() # depends on [control=['if'], data=[]]
label_data = label_data.astype(_np.float32)
if not label_data.flags.c_contiguous:
label_data = label_data.copy() # depends on [control=['if'], data=[]]
# Push this batch to the main thread.
numpy_batch_queue.put({'input': input_data, 'label': label_data, 'iteration': batch.iteration}) # depends on [control=['for'], data=[]] # depends on [control=['while'], data=[]]
# Tell the main thread there's no more data.
numpy_batch_queue.put(None)
sframe_worker_thread = _Thread(target=sframe_worker)
sframe_worker_thread.start()
numpy_worker_thread = _Thread(target=numpy_worker)
numpy_worker_thread.start()
batch_queue = []
def wait_for_batch():
pending_loss = batch_queue.pop(0)
batch_loss = pending_loss.asnumpy() # Waits for the batch to finish
return batch_loss.sum() / mps_loss_mult
while True:
batch = numpy_batch_queue.get()
if batch is None:
break # depends on [control=['if'], data=[]]
# Adjust learning rate according to our schedule.
if batch['iteration'] in steps:
ii = steps.index(batch['iteration']) + 1
new_lr = factors[ii] * base_lr
mps_net.set_learning_rate(new_lr / mps_loss_mult) # depends on [control=['if'], data=['steps']]
# Submit this match to MPS.
batch_queue.append(mps_net.train(batch['input'], batch['label']))
# If we have two batches in flight, wait for the first one.
if len(batch_queue) > 1:
cur_loss = wait_for_batch()
# If we just submitted the first batch of an iteration, update
# progress for the iteration completed by the last batch we just
# waited for.
if batch['iteration'] > iteration:
update_progress(cur_loss, iteration) # depends on [control=['if'], data=['iteration']] # depends on [control=['if'], data=[]]
iteration = batch['iteration'] # depends on [control=['while'], data=[]]
# Wait for any pending batches and finalize our progress updates.
while len(batch_queue) > 0:
cur_loss = wait_for_batch() # depends on [control=['while'], data=[]]
update_progress(cur_loss, iteration)
sframe_worker_thread.join()
numpy_worker_thread.join()
# Load back into mxnet
mps_net_params = mps_net.export()
keys = mps_net_params.keys()
for k in keys:
if k in net_params:
net_params[k].set_data(mps_net_params[k]) # depends on [control=['if'], data=['k', 'net_params']] # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=[]]
else: # Use MxNet
net.hybridize()
options = {'learning_rate': base_lr, 'lr_scheduler': lr_scheduler, 'momentum': params['sgd_momentum'], 'wd': params['weight_decay'], 'rescale_grad': 1.0}
clip_grad = params.get('clip_gradients')
if clip_grad:
options['clip_gradient'] = clip_grad # depends on [control=['if'], data=[]]
trainer = _mx.gluon.Trainer(net.collect_params(), 'sgd', options)
for batch in loader:
data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
Ls = []
Zs = []
with _mx.autograd.record():
for (x, y) in zip(data, label):
z = net(x)
z0 = _mx.nd.transpose(z, [0, 2, 3, 1]).reshape(ymap_shape)
L = loss(z0, y)
Ls.append(L) # depends on [control=['for'], data=[]]
for L in Ls:
L.backward() # depends on [control=['for'], data=['L']] # depends on [control=['with'], data=[]]
trainer.step(1)
cur_loss = _np.mean([L.asnumpy()[0] for L in Ls])
update_progress(cur_loss, batch.iteration)
iteration = batch.iteration # depends on [control=['for'], data=['batch']]
training_time = _time.time() - start_time
if verbose:
print(hr) # progress table footer # depends on [control=['if'], data=[]]
# Save the model
training_iterations = iteration + 1
state = {'_model': net, '_class_to_index': class_to_index, '_training_time_as_string': _seconds_as_string(training_time), '_grid_shape': grid_shape, 'anchors': anchors, 'model': model, 'classes': classes, 'batch_size': batch_size, 'input_image_shape': input_image_shape, 'feature': feature, 'non_maximum_suppression_threshold': params['non_maximum_suppression_threshold'], 'annotations': annotations, 'num_classes': num_classes, 'num_examples': num_images, 'num_bounding_boxes': num_instances, 'training_time': training_time, 'training_epochs': training_iterations * batch_size // num_images, 'training_iterations': training_iterations, 'max_iterations': max_iterations, 'training_loss': progress['smoothed_loss']}
return ObjectDetector(state) |
def rollbackBlockUser(self, userId, chatroomId):
"""
移除封禁聊天室成员方法 方法
@param userId:用户 Id。(必传)
@param chatroomId:聊天室 Id。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/chatroom/user/block/rollback.json',
params={"userId": userId,
"chatroomId": chatroomId})
return Response(r, desc) | def function[rollbackBlockUser, parameter[self, userId, chatroomId]]:
constant[
移除封禁聊天室成员方法 方法
@param userId:用户 Id。(必传)
@param chatroomId:聊天室 Id。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
]
variable[desc] assign[=] dictionary[[<ast.Constant object at 0x7da2041d8220>, <ast.Constant object at 0x7da2041db310>, <ast.Constant object at 0x7da2041d9a80>], [<ast.Constant object at 0x7da2041dbdf0>, <ast.Constant object at 0x7da2041db1c0>, <ast.List object at 0x7da2041d99c0>]]
variable[r] assign[=] call[name[self].call_api, parameter[]]
return[call[name[Response], parameter[name[r], name[desc]]]] | keyword[def] identifier[rollbackBlockUser] ( identifier[self] , identifier[userId] , identifier[chatroomId] ):
literal[string]
identifier[desc] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] :[{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
},{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}]
}
identifier[r] = identifier[self] . identifier[call_api] (
identifier[method] =( literal[string] , literal[string] , literal[string] ),
identifier[action] = literal[string] ,
identifier[params] ={ literal[string] : identifier[userId] ,
literal[string] : identifier[chatroomId] })
keyword[return] identifier[Response] ( identifier[r] , identifier[desc] ) | def rollbackBlockUser(self, userId, chatroomId):
"""
移除封禁聊天室成员方法 方法
@param userId:用户 Id。(必传)
@param chatroomId:聊天室 Id。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {'name': 'CodeSuccessReslut', 'desc': ' http 成功返回结果', 'fields': [{'name': 'code', 'type': 'Integer', 'desc': '返回码,200 为正常。'}, {'name': 'errorMessage', 'type': 'String', 'desc': '错误信息。'}]}
r = self.call_api(method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/chatroom/user/block/rollback.json', params={'userId': userId, 'chatroomId': chatroomId})
return Response(r, desc) |
def get_queryset(self):
"""
Returns a VersionedQuerySet capable of handling version time
restrictions.
:return: VersionedQuerySet
"""
qs = VersionedQuerySet(self.model, using=self._db)
if hasattr(self, 'instance') and hasattr(self.instance, '_querytime'):
qs.querytime = self.instance._querytime
return qs | def function[get_queryset, parameter[self]]:
constant[
Returns a VersionedQuerySet capable of handling version time
restrictions.
:return: VersionedQuerySet
]
variable[qs] assign[=] call[name[VersionedQuerySet], parameter[name[self].model]]
if <ast.BoolOp object at 0x7da1b1039600> begin[:]
name[qs].querytime assign[=] name[self].instance._querytime
return[name[qs]] | keyword[def] identifier[get_queryset] ( identifier[self] ):
literal[string]
identifier[qs] = identifier[VersionedQuerySet] ( identifier[self] . identifier[model] , identifier[using] = identifier[self] . identifier[_db] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[self] . identifier[instance] , literal[string] ):
identifier[qs] . identifier[querytime] = identifier[self] . identifier[instance] . identifier[_querytime]
keyword[return] identifier[qs] | def get_queryset(self):
"""
Returns a VersionedQuerySet capable of handling version time
restrictions.
:return: VersionedQuerySet
"""
qs = VersionedQuerySet(self.model, using=self._db)
if hasattr(self, 'instance') and hasattr(self.instance, '_querytime'):
qs.querytime = self.instance._querytime # depends on [control=['if'], data=[]]
return qs |
def course(self, dept, course_number):
"""Return an object of semester-independent course info. All arguments
should be strings.
>>> cis120 = r.course('cis', '120')
"""
response = self._request(path.join(ENDPOINTS['CATALOG'], dept, course_number))
return response['result_data'][0] | def function[course, parameter[self, dept, course_number]]:
constant[Return an object of semester-independent course info. All arguments
should be strings.
>>> cis120 = r.course('cis', '120')
]
variable[response] assign[=] call[name[self]._request, parameter[call[name[path].join, parameter[call[name[ENDPOINTS]][constant[CATALOG]], name[dept], name[course_number]]]]]
return[call[call[name[response]][constant[result_data]]][constant[0]]] | keyword[def] identifier[course] ( identifier[self] , identifier[dept] , identifier[course_number] ):
literal[string]
identifier[response] = identifier[self] . identifier[_request] ( identifier[path] . identifier[join] ( identifier[ENDPOINTS] [ literal[string] ], identifier[dept] , identifier[course_number] ))
keyword[return] identifier[response] [ literal[string] ][ literal[int] ] | def course(self, dept, course_number):
"""Return an object of semester-independent course info. All arguments
should be strings.
>>> cis120 = r.course('cis', '120')
"""
response = self._request(path.join(ENDPOINTS['CATALOG'], dept, course_number))
return response['result_data'][0] |
def concatenate(self, tpl, axis=None):
"""
Concatenates sparse tensors.
Parameters
----------
tpl : tuple of sparse tensors
Tensors to be concatenated.
axis : int, optional
Axis along which concatenation should take place
"""
if axis is None:
raise NotImplementedError(
'Sparse tensor concatenation without axis argument is not supported'
)
T = self
for i in range(1, len(tpl)):
T = _single_concatenate(T, tpl[i], axis=axis)
return T | def function[concatenate, parameter[self, tpl, axis]]:
constant[
Concatenates sparse tensors.
Parameters
----------
tpl : tuple of sparse tensors
Tensors to be concatenated.
axis : int, optional
Axis along which concatenation should take place
]
if compare[name[axis] is constant[None]] begin[:]
<ast.Raise object at 0x7da2044c1c90>
variable[T] assign[=] name[self]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], call[name[len], parameter[name[tpl]]]]]] begin[:]
variable[T] assign[=] call[name[_single_concatenate], parameter[name[T], call[name[tpl]][name[i]]]]
return[name[T]] | keyword[def] identifier[concatenate] ( identifier[self] , identifier[tpl] , identifier[axis] = keyword[None] ):
literal[string]
keyword[if] identifier[axis] keyword[is] keyword[None] :
keyword[raise] identifier[NotImplementedError] (
literal[string]
)
identifier[T] = identifier[self]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[tpl] )):
identifier[T] = identifier[_single_concatenate] ( identifier[T] , identifier[tpl] [ identifier[i] ], identifier[axis] = identifier[axis] )
keyword[return] identifier[T] | def concatenate(self, tpl, axis=None):
"""
Concatenates sparse tensors.
Parameters
----------
tpl : tuple of sparse tensors
Tensors to be concatenated.
axis : int, optional
Axis along which concatenation should take place
"""
if axis is None:
raise NotImplementedError('Sparse tensor concatenation without axis argument is not supported') # depends on [control=['if'], data=[]]
T = self
for i in range(1, len(tpl)):
T = _single_concatenate(T, tpl[i], axis=axis) # depends on [control=['for'], data=['i']]
return T |
def list_py_file_paths(directory, safe_mode=True,
include_examples=None):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES')
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as f:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
patterns += [re.compile(p) for p in f.read().split('\n') if p]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(p.search(os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = patterns
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False))
return file_paths | def function[list_py_file_paths, parameter[directory, safe_mode, include_examples]]:
constant[
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
]
if compare[name[include_examples] is constant[None]] begin[:]
variable[include_examples] assign[=] call[name[conf].getboolean, parameter[constant[core], constant[LOAD_EXAMPLES]]]
variable[file_paths] assign[=] list[[]]
if compare[name[directory] is constant[None]] begin[:]
return[list[[]]]
if name[include_examples] begin[:]
import module[airflow.example_dags]
variable[example_dag_folder] assign[=] call[name[airflow].example_dags.__path__][constant[0]]
call[name[file_paths].extend, parameter[call[name[list_py_file_paths], parameter[name[example_dag_folder], name[safe_mode], constant[False]]]]]
return[name[file_paths]] | keyword[def] identifier[list_py_file_paths] ( identifier[directory] , identifier[safe_mode] = keyword[True] ,
identifier[include_examples] = keyword[None] ):
literal[string]
keyword[if] identifier[include_examples] keyword[is] keyword[None] :
identifier[include_examples] = identifier[conf] . identifier[getboolean] ( literal[string] , literal[string] )
identifier[file_paths] =[]
keyword[if] identifier[directory] keyword[is] keyword[None] :
keyword[return] []
keyword[elif] identifier[os] . identifier[path] . identifier[isfile] ( identifier[directory] ):
keyword[return] [ identifier[directory] ]
keyword[elif] identifier[os] . identifier[path] . identifier[isdir] ( identifier[directory] ):
identifier[patterns_by_dir] ={}
keyword[for] identifier[root] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[directory] , identifier[followlinks] = keyword[True] ):
identifier[patterns] = identifier[patterns_by_dir] . identifier[get] ( identifier[root] ,[])
identifier[ignore_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[ignore_file] ):
keyword[with] identifier[open] ( identifier[ignore_file] , literal[string] ) keyword[as] identifier[f] :
identifier[patterns] +=[ identifier[re] . identifier[compile] ( identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[f] . identifier[read] (). identifier[split] ( literal[string] ) keyword[if] identifier[p] ]
identifier[dirs] [:]=[
identifier[d]
keyword[for] identifier[d] keyword[in] identifier[dirs]
keyword[if] keyword[not] identifier[any] ( identifier[p] . identifier[search] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[d] )) keyword[for] identifier[p] keyword[in] identifier[patterns] )
]
keyword[for] identifier[d] keyword[in] identifier[dirs] :
identifier[patterns_by_dir] [ identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[d] )]= identifier[patterns]
keyword[for] identifier[f] keyword[in] identifier[files] :
keyword[try] :
identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[f] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[file_path] ):
keyword[continue]
identifier[mod_name] , identifier[file_ext] = identifier[os] . identifier[path] . identifier[splitext] (
identifier[os] . identifier[path] . identifier[split] ( identifier[file_path] )[- literal[int] ])
keyword[if] identifier[file_ext] != literal[string] keyword[and] keyword[not] identifier[zipfile] . identifier[is_zipfile] ( identifier[file_path] ):
keyword[continue]
keyword[if] identifier[any] ([ identifier[re] . identifier[findall] ( identifier[p] , identifier[file_path] ) keyword[for] identifier[p] keyword[in] identifier[patterns] ]):
keyword[continue]
identifier[might_contain_dag] = keyword[True]
keyword[if] identifier[safe_mode] keyword[and] keyword[not] identifier[zipfile] . identifier[is_zipfile] ( identifier[file_path] ):
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[fp] :
identifier[content] = identifier[fp] . identifier[read] ()
identifier[might_contain_dag] = identifier[all] (
[ identifier[s] keyword[in] identifier[content] keyword[for] identifier[s] keyword[in] ( literal[string] , literal[string] )])
keyword[if] keyword[not] identifier[might_contain_dag] :
keyword[continue]
identifier[file_paths] . identifier[append] ( identifier[file_path] )
keyword[except] identifier[Exception] :
identifier[log] = identifier[LoggingMixin] (). identifier[log]
identifier[log] . identifier[exception] ( literal[string] , identifier[f] )
keyword[if] identifier[include_examples] :
keyword[import] identifier[airflow] . identifier[example_dags]
identifier[example_dag_folder] = identifier[airflow] . identifier[example_dags] . identifier[__path__] [ literal[int] ]
identifier[file_paths] . identifier[extend] ( identifier[list_py_file_paths] ( identifier[example_dag_folder] , identifier[safe_mode] , keyword[False] ))
keyword[return] identifier[file_paths] | def list_py_file_paths(directory, safe_mode=True, include_examples=None):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES') # depends on [control=['if'], data=['include_examples']]
file_paths = []
if directory is None:
return [] # depends on [control=['if'], data=[]]
elif os.path.isfile(directory):
return [directory] # depends on [control=['if'], data=[]]
elif os.path.isdir(directory):
patterns_by_dir = {}
for (root, dirs, files) in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as f:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
patterns += [re.compile(p) for p in f.read().split('\n') if p] # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [d for d in dirs if not any((p.search(os.path.join(root, d)) for p in patterns))]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = patterns # depends on [control=['for'], data=['d']]
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue # depends on [control=['if'], data=[]]
(mod_name, file_ext) = os.path.splitext(os.path.split(file_path)[-1])
if file_ext != '.py' and (not zipfile.is_zipfile(file_path)):
continue # depends on [control=['if'], data=[]]
if any([re.findall(p, file_path) for p in patterns]):
continue # depends on [control=['if'], data=[]]
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and (not zipfile.is_zipfile(file_path)):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all([s in content for s in (b'DAG', b'airflow')]) # depends on [control=['with'], data=['fp']] # depends on [control=['if'], data=[]]
if not might_contain_dag:
continue # depends on [control=['if'], data=[]]
file_paths.append(file_path) # depends on [control=['try'], data=[]]
except Exception:
log = LoggingMixin().log
log.exception('Error while examining %s', f) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['f']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False)) # depends on [control=['if'], data=[]]
return file_paths |
def rasterize(self,
pitch,
origin,
resolution=None,
fill=True,
width=None,
**kwargs):
"""
Rasterize a Path2D object into a boolean image ("mode 1").
Parameters
------------
pitch: float, length in model space of a pixel edge
origin: (2,) float, origin position in model space
resolution: (2,) int, resolution in pixel space
fill: bool, if True will return closed regions as filled
width: int, if not None will draw outline this wide (pixels)
Returns
------------
raster: PIL.Image object, mode 1
"""
image = raster.rasterize(self,
pitch=pitch,
origin=origin,
resolution=resolution,
fill=fill,
width=width)
return image | def function[rasterize, parameter[self, pitch, origin, resolution, fill, width]]:
constant[
Rasterize a Path2D object into a boolean image ("mode 1").
Parameters
------------
pitch: float, length in model space of a pixel edge
origin: (2,) float, origin position in model space
resolution: (2,) int, resolution in pixel space
fill: bool, if True will return closed regions as filled
width: int, if not None will draw outline this wide (pixels)
Returns
------------
raster: PIL.Image object, mode 1
]
variable[image] assign[=] call[name[raster].rasterize, parameter[name[self]]]
return[name[image]] | keyword[def] identifier[rasterize] ( identifier[self] ,
identifier[pitch] ,
identifier[origin] ,
identifier[resolution] = keyword[None] ,
identifier[fill] = keyword[True] ,
identifier[width] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[image] = identifier[raster] . identifier[rasterize] ( identifier[self] ,
identifier[pitch] = identifier[pitch] ,
identifier[origin] = identifier[origin] ,
identifier[resolution] = identifier[resolution] ,
identifier[fill] = identifier[fill] ,
identifier[width] = identifier[width] )
keyword[return] identifier[image] | def rasterize(self, pitch, origin, resolution=None, fill=True, width=None, **kwargs):
"""
Rasterize a Path2D object into a boolean image ("mode 1").
Parameters
------------
pitch: float, length in model space of a pixel edge
origin: (2,) float, origin position in model space
resolution: (2,) int, resolution in pixel space
fill: bool, if True will return closed regions as filled
width: int, if not None will draw outline this wide (pixels)
Returns
------------
raster: PIL.Image object, mode 1
"""
image = raster.rasterize(self, pitch=pitch, origin=origin, resolution=resolution, fill=fill, width=width)
return image |
def _get_log_schema(self):
"""
Get the log schema for this SMC version.
:return: dict
"""
if self.session and self.session_id:
schema = '{}/{}/monitoring/log/schemas'.format(self.url, self.api_version)
response = self.session.get(
url=schema,
headers={'cookie': self.session_id,
'content-type': 'application/json'})
if response.status_code in (200, 201):
return response.json() | def function[_get_log_schema, parameter[self]]:
constant[
Get the log schema for this SMC version.
:return: dict
]
if <ast.BoolOp object at 0x7da1b1a29ab0> begin[:]
variable[schema] assign[=] call[constant[{}/{}/monitoring/log/schemas].format, parameter[name[self].url, name[self].api_version]]
variable[response] assign[=] call[name[self].session.get, parameter[]]
if compare[name[response].status_code in tuple[[<ast.Constant object at 0x7da1b1a2b730>, <ast.Constant object at 0x7da1b1a294e0>]]] begin[:]
return[call[name[response].json, parameter[]]] | keyword[def] identifier[_get_log_schema] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[session] keyword[and] identifier[self] . identifier[session_id] :
identifier[schema] = literal[string] . identifier[format] ( identifier[self] . identifier[url] , identifier[self] . identifier[api_version] )
identifier[response] = identifier[self] . identifier[session] . identifier[get] (
identifier[url] = identifier[schema] ,
identifier[headers] ={ literal[string] : identifier[self] . identifier[session_id] ,
literal[string] : literal[string] })
keyword[if] identifier[response] . identifier[status_code] keyword[in] ( literal[int] , literal[int] ):
keyword[return] identifier[response] . identifier[json] () | def _get_log_schema(self):
"""
Get the log schema for this SMC version.
:return: dict
"""
if self.session and self.session_id:
schema = '{}/{}/monitoring/log/schemas'.format(self.url, self.api_version)
response = self.session.get(url=schema, headers={'cookie': self.session_id, 'content-type': 'application/json'})
if response.status_code in (200, 201):
return response.json() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def delete_attributes(self, item_name, attributes=None,
expected_values=None):
"""
Delete attributes from a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being deleted.
:type attributes: dict, list or :class:`boto.sdb.item.Item`
:param attributes: Either a list containing attribute names which will cause
all values associated with that attribute name to be deleted or
a dict or Item containing the attribute names and keys and list
of values to delete as the value. If no value is supplied,
all attribute name/values for the item will be deleted.
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be of
the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or
non-existence (False) of the attribute.
:rtype: bool
:return: True if successful
"""
return self.connection.delete_attributes(self, item_name, attributes,
expected_values) | def function[delete_attributes, parameter[self, item_name, attributes, expected_values]]:
constant[
Delete attributes from a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being deleted.
:type attributes: dict, list or :class:`boto.sdb.item.Item`
:param attributes: Either a list containing attribute names which will cause
all values associated with that attribute name to be deleted or
a dict or Item containing the attribute names and keys and list
of values to delete as the value. If no value is supplied,
all attribute name/values for the item will be deleted.
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be of
the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or
non-existence (False) of the attribute.
:rtype: bool
:return: True if successful
]
return[call[name[self].connection.delete_attributes, parameter[name[self], name[item_name], name[attributes], name[expected_values]]]] | keyword[def] identifier[delete_attributes] ( identifier[self] , identifier[item_name] , identifier[attributes] = keyword[None] ,
identifier[expected_values] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[connection] . identifier[delete_attributes] ( identifier[self] , identifier[item_name] , identifier[attributes] ,
identifier[expected_values] ) | def delete_attributes(self, item_name, attributes=None, expected_values=None):
"""
Delete attributes from a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being deleted.
:type attributes: dict, list or :class:`boto.sdb.item.Item`
:param attributes: Either a list containing attribute names which will cause
all values associated with that attribute name to be deleted or
a dict or Item containing the attribute names and keys and list
of values to delete as the value. If no value is supplied,
all attribute name/values for the item will be deleted.
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be of
the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or
non-existence (False) of the attribute.
:rtype: bool
:return: True if successful
"""
return self.connection.delete_attributes(self, item_name, attributes, expected_values) |
def plot_chmap(cube, kidid, ax=None, **kwargs):
"""Plot an intensity map.
Args:
cube (xarray.DataArray): Cube which the spectrum information is included.
kidid (int): Kidid.
ax (matplotlib.axes): Axis the figure is plotted on.
kwargs (optional): Plot options passed to ax.imshow().
"""
if ax is None:
ax = plt.gca()
index = np.where(cube.kidid == kidid)[0]
if len(index) == 0:
raise KeyError('Such a kidid does not exist.')
index = int(index)
im = ax.pcolormesh(cube.x, cube.y, cube[:, :, index].T, **kwargs)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('intensity map ch #{}'.format(kidid))
return im | def function[plot_chmap, parameter[cube, kidid, ax]]:
constant[Plot an intensity map.
Args:
cube (xarray.DataArray): Cube which the spectrum information is included.
kidid (int): Kidid.
ax (matplotlib.axes): Axis the figure is plotted on.
kwargs (optional): Plot options passed to ax.imshow().
]
if compare[name[ax] is constant[None]] begin[:]
variable[ax] assign[=] call[name[plt].gca, parameter[]]
variable[index] assign[=] call[call[name[np].where, parameter[compare[name[cube].kidid equal[==] name[kidid]]]]][constant[0]]
if compare[call[name[len], parameter[name[index]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da20e9b20b0>
variable[index] assign[=] call[name[int], parameter[name[index]]]
variable[im] assign[=] call[name[ax].pcolormesh, parameter[name[cube].x, name[cube].y, call[name[cube]][tuple[[<ast.Slice object at 0x7da20e9b1e70>, <ast.Slice object at 0x7da20e9b1c60>, <ast.Name object at 0x7da20e9b0100>]]].T]]
call[name[ax].set_xlabel, parameter[constant[x]]]
call[name[ax].set_ylabel, parameter[constant[y]]]
call[name[ax].set_title, parameter[call[constant[intensity map ch #{}].format, parameter[name[kidid]]]]]
return[name[im]] | keyword[def] identifier[plot_chmap] ( identifier[cube] , identifier[kidid] , identifier[ax] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[ax] keyword[is] keyword[None] :
identifier[ax] = identifier[plt] . identifier[gca] ()
identifier[index] = identifier[np] . identifier[where] ( identifier[cube] . identifier[kidid] == identifier[kidid] )[ literal[int] ]
keyword[if] identifier[len] ( identifier[index] )== literal[int] :
keyword[raise] identifier[KeyError] ( literal[string] )
identifier[index] = identifier[int] ( identifier[index] )
identifier[im] = identifier[ax] . identifier[pcolormesh] ( identifier[cube] . identifier[x] , identifier[cube] . identifier[y] , identifier[cube] [:,:, identifier[index] ]. identifier[T] ,** identifier[kwargs] )
identifier[ax] . identifier[set_xlabel] ( literal[string] )
identifier[ax] . identifier[set_ylabel] ( literal[string] )
identifier[ax] . identifier[set_title] ( literal[string] . identifier[format] ( identifier[kidid] ))
keyword[return] identifier[im] | def plot_chmap(cube, kidid, ax=None, **kwargs):
"""Plot an intensity map.
Args:
cube (xarray.DataArray): Cube which the spectrum information is included.
kidid (int): Kidid.
ax (matplotlib.axes): Axis the figure is plotted on.
kwargs (optional): Plot options passed to ax.imshow().
"""
if ax is None:
ax = plt.gca() # depends on [control=['if'], data=['ax']]
index = np.where(cube.kidid == kidid)[0]
if len(index) == 0:
raise KeyError('Such a kidid does not exist.') # depends on [control=['if'], data=[]]
index = int(index)
im = ax.pcolormesh(cube.x, cube.y, cube[:, :, index].T, **kwargs)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('intensity map ch #{}'.format(kidid))
return im |
def getLearnableLocationRepresentation(self):
"""
Get the cells in the location layer that should be associated with the
sensory input layer representation. In some models, this is identical to the
active cells. In others, it's a subset.
"""
learnableCells = np.array([], dtype="uint32")
totalPrevCells = 0
for module in self.L6aModules:
learnableCells = np.append(learnableCells,
module.getLearnableCells() + totalPrevCells)
totalPrevCells += module.numberOfCells()
return learnableCells | def function[getLearnableLocationRepresentation, parameter[self]]:
constant[
Get the cells in the location layer that should be associated with the
sensory input layer representation. In some models, this is identical to the
active cells. In others, it's a subset.
]
variable[learnableCells] assign[=] call[name[np].array, parameter[list[[]]]]
variable[totalPrevCells] assign[=] constant[0]
for taget[name[module]] in starred[name[self].L6aModules] begin[:]
variable[learnableCells] assign[=] call[name[np].append, parameter[name[learnableCells], binary_operation[call[name[module].getLearnableCells, parameter[]] + name[totalPrevCells]]]]
<ast.AugAssign object at 0x7da1b08b2830>
return[name[learnableCells]] | keyword[def] identifier[getLearnableLocationRepresentation] ( identifier[self] ):
literal[string]
identifier[learnableCells] = identifier[np] . identifier[array] ([], identifier[dtype] = literal[string] )
identifier[totalPrevCells] = literal[int]
keyword[for] identifier[module] keyword[in] identifier[self] . identifier[L6aModules] :
identifier[learnableCells] = identifier[np] . identifier[append] ( identifier[learnableCells] ,
identifier[module] . identifier[getLearnableCells] ()+ identifier[totalPrevCells] )
identifier[totalPrevCells] += identifier[module] . identifier[numberOfCells] ()
keyword[return] identifier[learnableCells] | def getLearnableLocationRepresentation(self):
"""
Get the cells in the location layer that should be associated with the
sensory input layer representation. In some models, this is identical to the
active cells. In others, it's a subset.
"""
learnableCells = np.array([], dtype='uint32')
totalPrevCells = 0
for module in self.L6aModules:
learnableCells = np.append(learnableCells, module.getLearnableCells() + totalPrevCells)
totalPrevCells += module.numberOfCells() # depends on [control=['for'], data=['module']]
return learnableCells |
def _round_whole_even(i):
r'''Round a number to the nearest whole number. If the number is exactly
between two numbers, round to the even whole number. Used by
`viscosity_index`.
Parameters
----------
i : float
Number, [-]
Returns
-------
i : int
Rounded number, [-]
Notes
-----
Should never run with inputs from a practical function, as numbers on
computers aren't really normally exactly between two numbers.
Examples
--------
_round_whole_even(116.5)
116
'''
if i % .5 == 0:
if (i + 0.5) % 2 == 0:
i = i + 0.5
else:
i = i - 0.5
else:
i = round(i, 0)
return int(i) | def function[_round_whole_even, parameter[i]]:
constant[Round a number to the nearest whole number. If the number is exactly
between two numbers, round to the even whole number. Used by
`viscosity_index`.
Parameters
----------
i : float
Number, [-]
Returns
-------
i : int
Rounded number, [-]
Notes
-----
Should never run with inputs from a practical function, as numbers on
computers aren't really normally exactly between two numbers.
Examples
--------
_round_whole_even(116.5)
116
]
if compare[binary_operation[name[i] <ast.Mod object at 0x7da2590d6920> constant[0.5]] equal[==] constant[0]] begin[:]
if compare[binary_operation[binary_operation[name[i] + constant[0.5]] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] constant[0]] begin[:]
variable[i] assign[=] binary_operation[name[i] + constant[0.5]]
return[call[name[int], parameter[name[i]]]] | keyword[def] identifier[_round_whole_even] ( identifier[i] ):
literal[string]
keyword[if] identifier[i] % literal[int] == literal[int] :
keyword[if] ( identifier[i] + literal[int] )% literal[int] == literal[int] :
identifier[i] = identifier[i] + literal[int]
keyword[else] :
identifier[i] = identifier[i] - literal[int]
keyword[else] :
identifier[i] = identifier[round] ( identifier[i] , literal[int] )
keyword[return] identifier[int] ( identifier[i] ) | def _round_whole_even(i):
"""Round a number to the nearest whole number. If the number is exactly
between two numbers, round to the even whole number. Used by
`viscosity_index`.
Parameters
----------
i : float
Number, [-]
Returns
-------
i : int
Rounded number, [-]
Notes
-----
Should never run with inputs from a practical function, as numbers on
computers aren't really normally exactly between two numbers.
Examples
--------
_round_whole_even(116.5)
116
"""
if i % 0.5 == 0:
if (i + 0.5) % 2 == 0:
i = i + 0.5 # depends on [control=['if'], data=[]]
else:
i = i - 0.5 # depends on [control=['if'], data=[]]
else:
i = round(i, 0)
return int(i) |
def concat(cartesians, ignore_index=False, keys=None):
"""Join list of cartesians into one molecule.
Wrapper around the :func:`pandas.concat` function.
Default values are the same as in the pandas function except for
``verify_integrity`` which is set to true in case of this library.
Args:
ignore_index (sequence, bool, int): If it is a boolean, it
behaves like in the description of
:meth:`pandas.DataFrame.append`.
If it is a sequence, it becomes the new index.
If it is an integer,
``range(ignore_index, ignore_index + len(new))``
becomes the new index.
keys (sequence): If multiple levels passed, should contain tuples.
Construct hierarchical index using the passed keys as
the outermost level
Returns:
Cartesian:
"""
frames = [molecule._frame for molecule in cartesians]
new = pd.concat(frames, ignore_index=ignore_index, keys=keys,
verify_integrity=True)
if type(ignore_index) is bool:
new = pd.concat(frames, ignore_index=ignore_index, keys=keys,
verify_integrity=True)
else:
new = pd.concat(frames, ignore_index=True, keys=keys,
verify_integrity=True)
if type(ignore_index) is int:
new.index = range(ignore_index,
ignore_index + len(new))
else:
new.index = ignore_index
return cartesians[0].__class__(new) | def function[concat, parameter[cartesians, ignore_index, keys]]:
constant[Join list of cartesians into one molecule.
Wrapper around the :func:`pandas.concat` function.
Default values are the same as in the pandas function except for
``verify_integrity`` which is set to true in case of this library.
Args:
ignore_index (sequence, bool, int): If it is a boolean, it
behaves like in the description of
:meth:`pandas.DataFrame.append`.
If it is a sequence, it becomes the new index.
If it is an integer,
``range(ignore_index, ignore_index + len(new))``
becomes the new index.
keys (sequence): If multiple levels passed, should contain tuples.
Construct hierarchical index using the passed keys as
the outermost level
Returns:
Cartesian:
]
variable[frames] assign[=] <ast.ListComp object at 0x7da1b28d5240>
variable[new] assign[=] call[name[pd].concat, parameter[name[frames]]]
if compare[call[name[type], parameter[name[ignore_index]]] is name[bool]] begin[:]
variable[new] assign[=] call[name[pd].concat, parameter[name[frames]]]
return[call[call[name[cartesians]][constant[0]].__class__, parameter[name[new]]]] | keyword[def] identifier[concat] ( identifier[cartesians] , identifier[ignore_index] = keyword[False] , identifier[keys] = keyword[None] ):
literal[string]
identifier[frames] =[ identifier[molecule] . identifier[_frame] keyword[for] identifier[molecule] keyword[in] identifier[cartesians] ]
identifier[new] = identifier[pd] . identifier[concat] ( identifier[frames] , identifier[ignore_index] = identifier[ignore_index] , identifier[keys] = identifier[keys] ,
identifier[verify_integrity] = keyword[True] )
keyword[if] identifier[type] ( identifier[ignore_index] ) keyword[is] identifier[bool] :
identifier[new] = identifier[pd] . identifier[concat] ( identifier[frames] , identifier[ignore_index] = identifier[ignore_index] , identifier[keys] = identifier[keys] ,
identifier[verify_integrity] = keyword[True] )
keyword[else] :
identifier[new] = identifier[pd] . identifier[concat] ( identifier[frames] , identifier[ignore_index] = keyword[True] , identifier[keys] = identifier[keys] ,
identifier[verify_integrity] = keyword[True] )
keyword[if] identifier[type] ( identifier[ignore_index] ) keyword[is] identifier[int] :
identifier[new] . identifier[index] = identifier[range] ( identifier[ignore_index] ,
identifier[ignore_index] + identifier[len] ( identifier[new] ))
keyword[else] :
identifier[new] . identifier[index] = identifier[ignore_index]
keyword[return] identifier[cartesians] [ literal[int] ]. identifier[__class__] ( identifier[new] ) | def concat(cartesians, ignore_index=False, keys=None):
"""Join list of cartesians into one molecule.
Wrapper around the :func:`pandas.concat` function.
Default values are the same as in the pandas function except for
``verify_integrity`` which is set to true in case of this library.
Args:
ignore_index (sequence, bool, int): If it is a boolean, it
behaves like in the description of
:meth:`pandas.DataFrame.append`.
If it is a sequence, it becomes the new index.
If it is an integer,
``range(ignore_index, ignore_index + len(new))``
becomes the new index.
keys (sequence): If multiple levels passed, should contain tuples.
Construct hierarchical index using the passed keys as
the outermost level
Returns:
Cartesian:
"""
frames = [molecule._frame for molecule in cartesians]
new = pd.concat(frames, ignore_index=ignore_index, keys=keys, verify_integrity=True)
if type(ignore_index) is bool:
new = pd.concat(frames, ignore_index=ignore_index, keys=keys, verify_integrity=True) # depends on [control=['if'], data=[]]
else:
new = pd.concat(frames, ignore_index=True, keys=keys, verify_integrity=True)
if type(ignore_index) is int:
new.index = range(ignore_index, ignore_index + len(new)) # depends on [control=['if'], data=[]]
else:
new.index = ignore_index
return cartesians[0].__class__(new) |
def boundary_maximum_linear(graph, xxx_todo_changeme1):
r"""
Boundary term processing adjacent voxels maximum value using a linear relationship.
An implementation of a boundary term, suitable to be used with the
`~medpy.graphcut.generate.graph_from_voxels` function.
The same as `boundary_difference_linear`, but working on the gradient image instead
of the original. See there for details.
Parameters
----------
graph : GCGraph
The graph to add the weights to.
gradient_image : ndarray
The gradient image.
spacing : sequence of float or False
A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
`False`, no distance based weighting of the graph edges is performed.
Notes
-----
This function requires the gradient image to be passed along. That means that
`~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the
gradient image.
"""
(gradient_image, spacing) = xxx_todo_changeme1
gradient_image = scipy.asarray(gradient_image)
# compute maximum intensity to encounter
max_intensity = float(numpy.abs(gradient_image).max())
def boundary_term_linear(intensities):
"""
Implementation of a linear boundary term computation over an array.
"""
# normalize the intensity distances to the interval (0, 1]
intensities /= max_intensity
#difference_to_neighbour[difference_to_neighbour > 1] = 1 # this line should not be required, but might be due to rounding errors
intensities = (1. - intensities) # reverse weights such that high intensity difference lead to small weights and hence more likely to a cut at this edge
intensities[intensities == 0.] = sys.float_info.min # required to avoid zero values
return intensities
__skeleton_maximum(graph, gradient_image, boundary_term_linear, spacing) | def function[boundary_maximum_linear, parameter[graph, xxx_todo_changeme1]]:
constant[
Boundary term processing adjacent voxels maximum value using a linear relationship.
An implementation of a boundary term, suitable to be used with the
`~medpy.graphcut.generate.graph_from_voxels` function.
The same as `boundary_difference_linear`, but working on the gradient image instead
of the original. See there for details.
Parameters
----------
graph : GCGraph
The graph to add the weights to.
gradient_image : ndarray
The gradient image.
spacing : sequence of float or False
A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
`False`, no distance based weighting of the graph edges is performed.
Notes
-----
This function requires the gradient image to be passed along. That means that
`~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the
gradient image.
]
<ast.Tuple object at 0x7da1b15166e0> assign[=] name[xxx_todo_changeme1]
variable[gradient_image] assign[=] call[name[scipy].asarray, parameter[name[gradient_image]]]
variable[max_intensity] assign[=] call[name[float], parameter[call[call[name[numpy].abs, parameter[name[gradient_image]]].max, parameter[]]]]
def function[boundary_term_linear, parameter[intensities]]:
constant[
Implementation of a linear boundary term computation over an array.
]
<ast.AugAssign object at 0x7da1b1516f80>
variable[intensities] assign[=] binary_operation[constant[1.0] - name[intensities]]
call[name[intensities]][compare[name[intensities] equal[==] constant[0.0]]] assign[=] name[sys].float_info.min
return[name[intensities]]
call[name[__skeleton_maximum], parameter[name[graph], name[gradient_image], name[boundary_term_linear], name[spacing]]] | keyword[def] identifier[boundary_maximum_linear] ( identifier[graph] , identifier[xxx_todo_changeme1] ):
literal[string]
( identifier[gradient_image] , identifier[spacing] )= identifier[xxx_todo_changeme1]
identifier[gradient_image] = identifier[scipy] . identifier[asarray] ( identifier[gradient_image] )
identifier[max_intensity] = identifier[float] ( identifier[numpy] . identifier[abs] ( identifier[gradient_image] ). identifier[max] ())
keyword[def] identifier[boundary_term_linear] ( identifier[intensities] ):
literal[string]
identifier[intensities] /= identifier[max_intensity]
identifier[intensities] =( literal[int] - identifier[intensities] )
identifier[intensities] [ identifier[intensities] == literal[int] ]= identifier[sys] . identifier[float_info] . identifier[min]
keyword[return] identifier[intensities]
identifier[__skeleton_maximum] ( identifier[graph] , identifier[gradient_image] , identifier[boundary_term_linear] , identifier[spacing] ) | def boundary_maximum_linear(graph, xxx_todo_changeme1):
"""
Boundary term processing adjacent voxels maximum value using a linear relationship.
An implementation of a boundary term, suitable to be used with the
`~medpy.graphcut.generate.graph_from_voxels` function.
The same as `boundary_difference_linear`, but working on the gradient image instead
of the original. See there for details.
Parameters
----------
graph : GCGraph
The graph to add the weights to.
gradient_image : ndarray
The gradient image.
spacing : sequence of float or False
A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
`False`, no distance based weighting of the graph edges is performed.
Notes
-----
This function requires the gradient image to be passed along. That means that
`~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the
gradient image.
"""
(gradient_image, spacing) = xxx_todo_changeme1
gradient_image = scipy.asarray(gradient_image)
# compute maximum intensity to encounter
max_intensity = float(numpy.abs(gradient_image).max())
def boundary_term_linear(intensities):
"""
Implementation of a linear boundary term computation over an array.
"""
# normalize the intensity distances to the interval (0, 1]
intensities /= max_intensity
#difference_to_neighbour[difference_to_neighbour > 1] = 1 # this line should not be required, but might be due to rounding errors
intensities = 1.0 - intensities # reverse weights such that high intensity difference lead to small weights and hence more likely to a cut at this edge
intensities[intensities == 0.0] = sys.float_info.min # required to avoid zero values
return intensities
__skeleton_maximum(graph, gradient_image, boundary_term_linear, spacing) |
def get_user_config(config_file=None, default_config=False):
"""Return the user config as a dict.
If ``default_config`` is True, ignore ``config_file`` and return default
values for the config parameters.
If a path to a ``config_file`` is given, that is different from the default
location, load the user config from that.
Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``
environment variable. If set, load the config from this path. This will
raise an error if the specified path is not valid.
If the environment variable is not set, try the default config file path
before falling back to the default config values.
"""
# Do NOT load a config. Return defaults instead.
if default_config:
return copy.copy(DEFAULT_CONFIG)
# Load the given config file
if config_file and config_file is not USER_CONFIG_PATH:
return get_config(config_file)
try:
# Does the user set up a config environment variable?
env_config_file = os.environ['COOKIECUTTER_CONFIG']
except KeyError:
# Load an optional user config if it exists
# otherwise return the defaults
if os.path.exists(USER_CONFIG_PATH):
return get_config(USER_CONFIG_PATH)
else:
return copy.copy(DEFAULT_CONFIG)
else:
# There is a config environment variable. Try to load it.
# Do not check for existence, so invalid file paths raise an error.
return get_config(env_config_file) | def function[get_user_config, parameter[config_file, default_config]]:
constant[Return the user config as a dict.
If ``default_config`` is True, ignore ``config_file`` and return default
values for the config parameters.
If a path to a ``config_file`` is given, that is different from the default
location, load the user config from that.
Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``
environment variable. If set, load the config from this path. This will
raise an error if the specified path is not valid.
If the environment variable is not set, try the default config file path
before falling back to the default config values.
]
if name[default_config] begin[:]
return[call[name[copy].copy, parameter[name[DEFAULT_CONFIG]]]]
if <ast.BoolOp object at 0x7da1b212dde0> begin[:]
return[call[name[get_config], parameter[name[config_file]]]]
<ast.Try object at 0x7da1b212ebc0> | keyword[def] identifier[get_user_config] ( identifier[config_file] = keyword[None] , identifier[default_config] = keyword[False] ):
literal[string]
keyword[if] identifier[default_config] :
keyword[return] identifier[copy] . identifier[copy] ( identifier[DEFAULT_CONFIG] )
keyword[if] identifier[config_file] keyword[and] identifier[config_file] keyword[is] keyword[not] identifier[USER_CONFIG_PATH] :
keyword[return] identifier[get_config] ( identifier[config_file] )
keyword[try] :
identifier[env_config_file] = identifier[os] . identifier[environ] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[USER_CONFIG_PATH] ):
keyword[return] identifier[get_config] ( identifier[USER_CONFIG_PATH] )
keyword[else] :
keyword[return] identifier[copy] . identifier[copy] ( identifier[DEFAULT_CONFIG] )
keyword[else] :
keyword[return] identifier[get_config] ( identifier[env_config_file] ) | def get_user_config(config_file=None, default_config=False):
"""Return the user config as a dict.
If ``default_config`` is True, ignore ``config_file`` and return default
values for the config parameters.
If a path to a ``config_file`` is given, that is different from the default
location, load the user config from that.
Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``
environment variable. If set, load the config from this path. This will
raise an error if the specified path is not valid.
If the environment variable is not set, try the default config file path
before falling back to the default config values.
"""
# Do NOT load a config. Return defaults instead.
if default_config:
return copy.copy(DEFAULT_CONFIG) # depends on [control=['if'], data=[]]
# Load the given config file
if config_file and config_file is not USER_CONFIG_PATH:
return get_config(config_file) # depends on [control=['if'], data=[]]
try:
# Does the user set up a config environment variable?
env_config_file = os.environ['COOKIECUTTER_CONFIG'] # depends on [control=['try'], data=[]]
except KeyError:
# Load an optional user config if it exists
# otherwise return the defaults
if os.path.exists(USER_CONFIG_PATH):
return get_config(USER_CONFIG_PATH) # depends on [control=['if'], data=[]]
else:
return copy.copy(DEFAULT_CONFIG) # depends on [control=['except'], data=[]]
else:
# There is a config environment variable. Try to load it.
# Do not check for existence, so invalid file paths raise an error.
return get_config(env_config_file) |
def on_connect(self, client, userdata, flags, rc):
"""
on_connect(self, client, obj, flags, rc):
client
the client instance for this callback
userdata
the private user data as set in Client() or userdata_set()
flags
response flags sent by the broker
rc
the connection result
"""
_logger.debug("Connection established with result code %s" % rc)
if self.reg_thread is not None and self.reg_thread.is_alive():
_logger.debug("Joining previous reg_thread")
self.reg_thread.join()
def reg():
delay = None
if hasattr(self.reg_delay, '__call__'):
delay = self.reg_delay()
else:
delay = self.reg_delay
sleep(delay)
self._conn.set_tunnels(self._conn.tunnels)
model_profile = self.get_profile("model")
view_profile = self.get_profile("view")
self.deregister(model_profile)
self.deregister(view_profile)
self.register(model_profile)
self.register(view_profile)
self.is_ready.set()
self.reg_thread = Thread(target=reg)
self.reg_thread.daemon = True
self.reg_thread.start() | def function[on_connect, parameter[self, client, userdata, flags, rc]]:
constant[
on_connect(self, client, obj, flags, rc):
client
the client instance for this callback
userdata
the private user data as set in Client() or userdata_set()
flags
response flags sent by the broker
rc
the connection result
]
call[name[_logger].debug, parameter[binary_operation[constant[Connection established with result code %s] <ast.Mod object at 0x7da2590d6920> name[rc]]]]
if <ast.BoolOp object at 0x7da1b0b3a320> begin[:]
call[name[_logger].debug, parameter[constant[Joining previous reg_thread]]]
call[name[self].reg_thread.join, parameter[]]
def function[reg, parameter[]]:
variable[delay] assign[=] constant[None]
if call[name[hasattr], parameter[name[self].reg_delay, constant[__call__]]] begin[:]
variable[delay] assign[=] call[name[self].reg_delay, parameter[]]
call[name[sleep], parameter[name[delay]]]
call[name[self]._conn.set_tunnels, parameter[name[self]._conn.tunnels]]
variable[model_profile] assign[=] call[name[self].get_profile, parameter[constant[model]]]
variable[view_profile] assign[=] call[name[self].get_profile, parameter[constant[view]]]
call[name[self].deregister, parameter[name[model_profile]]]
call[name[self].deregister, parameter[name[view_profile]]]
call[name[self].register, parameter[name[model_profile]]]
call[name[self].register, parameter[name[view_profile]]]
call[name[self].is_ready.set, parameter[]]
name[self].reg_thread assign[=] call[name[Thread], parameter[]]
name[self].reg_thread.daemon assign[=] constant[True]
call[name[self].reg_thread.start, parameter[]] | keyword[def] identifier[on_connect] ( identifier[self] , identifier[client] , identifier[userdata] , identifier[flags] , identifier[rc] ):
literal[string]
identifier[_logger] . identifier[debug] ( literal[string] % identifier[rc] )
keyword[if] identifier[self] . identifier[reg_thread] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[reg_thread] . identifier[is_alive] ():
identifier[_logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[reg_thread] . identifier[join] ()
keyword[def] identifier[reg] ():
identifier[delay] = keyword[None]
keyword[if] identifier[hasattr] ( identifier[self] . identifier[reg_delay] , literal[string] ):
identifier[delay] = identifier[self] . identifier[reg_delay] ()
keyword[else] :
identifier[delay] = identifier[self] . identifier[reg_delay]
identifier[sleep] ( identifier[delay] )
identifier[self] . identifier[_conn] . identifier[set_tunnels] ( identifier[self] . identifier[_conn] . identifier[tunnels] )
identifier[model_profile] = identifier[self] . identifier[get_profile] ( literal[string] )
identifier[view_profile] = identifier[self] . identifier[get_profile] ( literal[string] )
identifier[self] . identifier[deregister] ( identifier[model_profile] )
identifier[self] . identifier[deregister] ( identifier[view_profile] )
identifier[self] . identifier[register] ( identifier[model_profile] )
identifier[self] . identifier[register] ( identifier[view_profile] )
identifier[self] . identifier[is_ready] . identifier[set] ()
identifier[self] . identifier[reg_thread] = identifier[Thread] ( identifier[target] = identifier[reg] )
identifier[self] . identifier[reg_thread] . identifier[daemon] = keyword[True]
identifier[self] . identifier[reg_thread] . identifier[start] () | def on_connect(self, client, userdata, flags, rc):
"""
on_connect(self, client, obj, flags, rc):
client
the client instance for this callback
userdata
the private user data as set in Client() or userdata_set()
flags
response flags sent by the broker
rc
the connection result
"""
_logger.debug('Connection established with result code %s' % rc)
if self.reg_thread is not None and self.reg_thread.is_alive():
_logger.debug('Joining previous reg_thread')
self.reg_thread.join() # depends on [control=['if'], data=[]]
def reg():
delay = None
if hasattr(self.reg_delay, '__call__'):
delay = self.reg_delay() # depends on [control=['if'], data=[]]
else:
delay = self.reg_delay
sleep(delay)
self._conn.set_tunnels(self._conn.tunnels)
model_profile = self.get_profile('model')
view_profile = self.get_profile('view')
self.deregister(model_profile)
self.deregister(view_profile)
self.register(model_profile)
self.register(view_profile)
self.is_ready.set()
self.reg_thread = Thread(target=reg)
self.reg_thread.daemon = True
self.reg_thread.start() |
def plot(self, label=None, colour='g', style='-'): # pragma: no cover
'''Plot the time series.'''
pylab = LazyImport.pylab()
pylab.plot(self.dates, self.values, '%s%s' % (colour, style), label=label)
if label is not None:
pylab.legend()
pylab.show() | def function[plot, parameter[self, label, colour, style]]:
constant[Plot the time series.]
variable[pylab] assign[=] call[name[LazyImport].pylab, parameter[]]
call[name[pylab].plot, parameter[name[self].dates, name[self].values, binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18c4cc220>, <ast.Name object at 0x7da18bc730d0>]]]]]
if compare[name[label] is_not constant[None]] begin[:]
call[name[pylab].legend, parameter[]]
call[name[pylab].show, parameter[]] | keyword[def] identifier[plot] ( identifier[self] , identifier[label] = keyword[None] , identifier[colour] = literal[string] , identifier[style] = literal[string] ):
literal[string]
identifier[pylab] = identifier[LazyImport] . identifier[pylab] ()
identifier[pylab] . identifier[plot] ( identifier[self] . identifier[dates] , identifier[self] . identifier[values] , literal[string] %( identifier[colour] , identifier[style] ), identifier[label] = identifier[label] )
keyword[if] identifier[label] keyword[is] keyword[not] keyword[None] :
identifier[pylab] . identifier[legend] ()
identifier[pylab] . identifier[show] () | def plot(self, label=None, colour='g', style='-'): # pragma: no cover
'Plot the time series.'
pylab = LazyImport.pylab()
pylab.plot(self.dates, self.values, '%s%s' % (colour, style), label=label)
if label is not None:
pylab.legend() # depends on [control=['if'], data=[]]
pylab.show() |
def _build_message(self, to, text, subject=None, mtype=None, unsubscribe_url=None):
"""Constructs a MIME message from message and dispatch models."""
# TODO Maybe file attachments handling through `files` message_model context var.
if subject is None:
subject = u'%s' % _('No Subject')
if mtype == 'html':
msg = self.mime_multipart()
text_part = self.mime_multipart('alternative')
text_part.attach(self.mime_text(strip_tags(text), _charset='utf-8'))
text_part.attach(self.mime_text(text, 'html', _charset='utf-8'))
msg.attach(text_part)
else:
msg = self.mime_text(text, _charset='utf-8')
msg['From'] = self.from_email
msg['To'] = to
msg['Subject'] = subject
if unsubscribe_url:
msg['List-Unsubscribe'] = '<%s>' % unsubscribe_url
return msg | def function[_build_message, parameter[self, to, text, subject, mtype, unsubscribe_url]]:
constant[Constructs a MIME message from message and dispatch models.]
if compare[name[subject] is constant[None]] begin[:]
variable[subject] assign[=] binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> call[name[_], parameter[constant[No Subject]]]]
if compare[name[mtype] equal[==] constant[html]] begin[:]
variable[msg] assign[=] call[name[self].mime_multipart, parameter[]]
variable[text_part] assign[=] call[name[self].mime_multipart, parameter[constant[alternative]]]
call[name[text_part].attach, parameter[call[name[self].mime_text, parameter[call[name[strip_tags], parameter[name[text]]]]]]]
call[name[text_part].attach, parameter[call[name[self].mime_text, parameter[name[text], constant[html]]]]]
call[name[msg].attach, parameter[name[text_part]]]
call[name[msg]][constant[From]] assign[=] name[self].from_email
call[name[msg]][constant[To]] assign[=] name[to]
call[name[msg]][constant[Subject]] assign[=] name[subject]
if name[unsubscribe_url] begin[:]
call[name[msg]][constant[List-Unsubscribe]] assign[=] binary_operation[constant[<%s>] <ast.Mod object at 0x7da2590d6920> name[unsubscribe_url]]
return[name[msg]] | keyword[def] identifier[_build_message] ( identifier[self] , identifier[to] , identifier[text] , identifier[subject] = keyword[None] , identifier[mtype] = keyword[None] , identifier[unsubscribe_url] = keyword[None] ):
literal[string]
keyword[if] identifier[subject] keyword[is] keyword[None] :
identifier[subject] = literal[string] % identifier[_] ( literal[string] )
keyword[if] identifier[mtype] == literal[string] :
identifier[msg] = identifier[self] . identifier[mime_multipart] ()
identifier[text_part] = identifier[self] . identifier[mime_multipart] ( literal[string] )
identifier[text_part] . identifier[attach] ( identifier[self] . identifier[mime_text] ( identifier[strip_tags] ( identifier[text] ), identifier[_charset] = literal[string] ))
identifier[text_part] . identifier[attach] ( identifier[self] . identifier[mime_text] ( identifier[text] , literal[string] , identifier[_charset] = literal[string] ))
identifier[msg] . identifier[attach] ( identifier[text_part] )
keyword[else] :
identifier[msg] = identifier[self] . identifier[mime_text] ( identifier[text] , identifier[_charset] = literal[string] )
identifier[msg] [ literal[string] ]= identifier[self] . identifier[from_email]
identifier[msg] [ literal[string] ]= identifier[to]
identifier[msg] [ literal[string] ]= identifier[subject]
keyword[if] identifier[unsubscribe_url] :
identifier[msg] [ literal[string] ]= literal[string] % identifier[unsubscribe_url]
keyword[return] identifier[msg] | def _build_message(self, to, text, subject=None, mtype=None, unsubscribe_url=None):
"""Constructs a MIME message from message and dispatch models."""
# TODO Maybe file attachments handling through `files` message_model context var.
if subject is None:
subject = u'%s' % _('No Subject') # depends on [control=['if'], data=['subject']]
if mtype == 'html':
msg = self.mime_multipart()
text_part = self.mime_multipart('alternative')
text_part.attach(self.mime_text(strip_tags(text), _charset='utf-8'))
text_part.attach(self.mime_text(text, 'html', _charset='utf-8'))
msg.attach(text_part) # depends on [control=['if'], data=[]]
else:
msg = self.mime_text(text, _charset='utf-8')
msg['From'] = self.from_email
msg['To'] = to
msg['Subject'] = subject
if unsubscribe_url:
msg['List-Unsubscribe'] = '<%s>' % unsubscribe_url # depends on [control=['if'], data=[]]
return msg |
def PlanetStatistics(model='nPLD', compare_to='k2sff', **kwargs):
'''
Computes and plots the CDPP statistics comparison between `model` and
`compare_to` for all known K2 planets.
:param str model: The :py:obj:`everest` model name
:param str compare_to: The :py:obj:`everest` model name or \
other K2 pipeline name
'''
# Load all planet hosts
f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'planets.tsv')
epic, campaign, kp, _, _, _, _, _, _ = np.loadtxt(
f, unpack=True, skiprows=2)
epic = np.array(epic, dtype=int)
campaign = np.array(campaign, dtype=int)
cdpp = np.zeros(len(epic))
saturated = np.zeros(len(epic), dtype=int)
cdpp_1 = np.zeros(len(epic))
# Get the stats
for c in set(campaign):
# Everest model
f = os.path.join(EVEREST_SRC, 'missions', 'k2',
'tables', 'c%02d_%s.cdpp' % (int(c), model))
e0, _, _, c0, _, _, _, _, s0 = np.loadtxt(f, unpack=True, skiprows=2)
for i, e in enumerate(epic):
if e in e0:
j = np.argmax(e0 == e)
cdpp[i] = c0[j]
saturated[i] = s0[j]
# Comparison model
f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables',
'c%02d_%s.cdpp' % (int(c), compare_to.lower()))
if not os.path.exists(f):
continue
if compare_to.lower() in ['everest1', 'k2sff', 'k2sc']:
e1, c1 = np.loadtxt(f, unpack=True, skiprows=2)
else:
e1, _, _, c1, _, _, _, _, _ = np.loadtxt(
f, unpack=True, skiprows=2)
for i, e in enumerate(epic):
if e in e1:
j = np.argmax(e1 == e)
cdpp_1[i] = c1[j]
sat = np.where(saturated == 1)
unsat = np.where(saturated == 0)
# Plot the equivalent of the Aigrain+16 figure
fig, ax = pl.subplots(1)
fig.canvas.set_window_title(
'K2 Planet Hosts: %s versus %s' % (model, compare_to))
x = kp
y = (cdpp - cdpp_1) / cdpp_1
ax.scatter(x[unsat], y[unsat], color='b', marker='.',
alpha=0.5, zorder=-1, picker=True)
ax.scatter(x[sat], y[sat], color='r', marker='.',
alpha=0.5, zorder=-1, picker=True)
ax.set_ylim(-1, 1)
ax.set_xlim(8, 18)
ax.axhline(0, color='gray', lw=2, zorder=-99, alpha=0.5)
ax.axhline(0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5)
ax.axhline(-0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5)
ax.set_title(r'K2 Planet Hosts', fontsize=18)
ax.set_ylabel(r'Relative CDPP', fontsize=18)
ax.set_xlabel('Kepler Magnitude', fontsize=18)
# Pickable points
Picker = StatsPicker([ax], [kp], [y], epic,
model=model, compare_to=compare_to)
fig.canvas.mpl_connect('pick_event', Picker)
# Show
pl.show() | def function[PlanetStatistics, parameter[model, compare_to]]:
constant[
Computes and plots the CDPP statistics comparison between `model` and
`compare_to` for all known K2 planets.
:param str model: The :py:obj:`everest` model name
:param str compare_to: The :py:obj:`everest` model name or other K2 pipeline name
]
variable[f] assign[=] call[name[os].path.join, parameter[name[EVEREST_SRC], constant[missions], constant[k2], constant[tables], constant[planets.tsv]]]
<ast.Tuple object at 0x7da1b0ffa6b0> assign[=] call[name[np].loadtxt, parameter[name[f]]]
variable[epic] assign[=] call[name[np].array, parameter[name[epic]]]
variable[campaign] assign[=] call[name[np].array, parameter[name[campaign]]]
variable[cdpp] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[epic]]]]]
variable[saturated] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[epic]]]]]
variable[cdpp_1] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[epic]]]]]
for taget[name[c]] in starred[call[name[set], parameter[name[campaign]]]] begin[:]
variable[f] assign[=] call[name[os].path.join, parameter[name[EVEREST_SRC], constant[missions], constant[k2], constant[tables], binary_operation[constant[c%02d_%s.cdpp] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b0e7d900>, <ast.Name object at 0x7da1b0e7d240>]]]]]
<ast.Tuple object at 0x7da1b0e7d120> assign[=] call[name[np].loadtxt, parameter[name[f]]]
for taget[tuple[[<ast.Name object at 0x7da1b0e7d870>, <ast.Name object at 0x7da1b0e7d990>]]] in starred[call[name[enumerate], parameter[name[epic]]]] begin[:]
if compare[name[e] in name[e0]] begin[:]
variable[j] assign[=] call[name[np].argmax, parameter[compare[name[e0] equal[==] name[e]]]]
call[name[cdpp]][name[i]] assign[=] call[name[c0]][name[j]]
call[name[saturated]][name[i]] assign[=] call[name[s0]][name[j]]
variable[f] assign[=] call[name[os].path.join, parameter[name[EVEREST_SRC], constant[missions], constant[k2], constant[tables], binary_operation[constant[c%02d_%s.cdpp] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b0e7c9a0>, <ast.Call object at 0x7da1b0e7c850>]]]]]
if <ast.UnaryOp object at 0x7da1b0f687f0> begin[:]
continue
if compare[call[name[compare_to].lower, parameter[]] in list[[<ast.Constant object at 0x7da1b0f69840>, <ast.Constant object at 0x7da1b0f19270>, <ast.Constant object at 0x7da1b0f19ff0>]]] begin[:]
<ast.Tuple object at 0x7da1b0f19240> assign[=] call[name[np].loadtxt, parameter[name[f]]]
for taget[tuple[[<ast.Name object at 0x7da1b0f19ba0>, <ast.Name object at 0x7da1b0f1a860>]]] in starred[call[name[enumerate], parameter[name[epic]]]] begin[:]
if compare[name[e] in name[e1]] begin[:]
variable[j] assign[=] call[name[np].argmax, parameter[compare[name[e1] equal[==] name[e]]]]
call[name[cdpp_1]][name[i]] assign[=] call[name[c1]][name[j]]
variable[sat] assign[=] call[name[np].where, parameter[compare[name[saturated] equal[==] constant[1]]]]
variable[unsat] assign[=] call[name[np].where, parameter[compare[name[saturated] equal[==] constant[0]]]]
<ast.Tuple object at 0x7da1b0f19f00> assign[=] call[name[pl].subplots, parameter[constant[1]]]
call[name[fig].canvas.set_window_title, parameter[binary_operation[constant[K2 Planet Hosts: %s versus %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0f1a410>, <ast.Name object at 0x7da1b0f1baf0>]]]]]
variable[x] assign[=] name[kp]
variable[y] assign[=] binary_operation[binary_operation[name[cdpp] - name[cdpp_1]] / name[cdpp_1]]
call[name[ax].scatter, parameter[call[name[x]][name[unsat]], call[name[y]][name[unsat]]]]
call[name[ax].scatter, parameter[call[name[x]][name[sat]], call[name[y]][name[sat]]]]
call[name[ax].set_ylim, parameter[<ast.UnaryOp object at 0x7da1b0e645b0>, constant[1]]]
call[name[ax].set_xlim, parameter[constant[8], constant[18]]]
call[name[ax].axhline, parameter[constant[0]]]
call[name[ax].axhline, parameter[constant[0.5]]]
call[name[ax].axhline, parameter[<ast.UnaryOp object at 0x7da1b0e640a0>]]
call[name[ax].set_title, parameter[constant[K2 Planet Hosts]]]
call[name[ax].set_ylabel, parameter[constant[Relative CDPP]]]
call[name[ax].set_xlabel, parameter[constant[Kepler Magnitude]]]
variable[Picker] assign[=] call[name[StatsPicker], parameter[list[[<ast.Name object at 0x7da1b0e64730>]], list[[<ast.Name object at 0x7da1b0e65f60>]], list[[<ast.Name object at 0x7da1b0e65ff0>]], name[epic]]]
call[name[fig].canvas.mpl_connect, parameter[constant[pick_event], name[Picker]]]
call[name[pl].show, parameter[]] | keyword[def] identifier[PlanetStatistics] ( identifier[model] = literal[string] , identifier[compare_to] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[f] = identifier[os] . identifier[path] . identifier[join] ( identifier[EVEREST_SRC] , literal[string] , literal[string] , literal[string] , literal[string] )
identifier[epic] , identifier[campaign] , identifier[kp] , identifier[_] , identifier[_] , identifier[_] , identifier[_] , identifier[_] , identifier[_] = identifier[np] . identifier[loadtxt] (
identifier[f] , identifier[unpack] = keyword[True] , identifier[skiprows] = literal[int] )
identifier[epic] = identifier[np] . identifier[array] ( identifier[epic] , identifier[dtype] = identifier[int] )
identifier[campaign] = identifier[np] . identifier[array] ( identifier[campaign] , identifier[dtype] = identifier[int] )
identifier[cdpp] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[epic] ))
identifier[saturated] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[epic] ), identifier[dtype] = identifier[int] )
identifier[cdpp_1] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[epic] ))
keyword[for] identifier[c] keyword[in] identifier[set] ( identifier[campaign] ):
identifier[f] = identifier[os] . identifier[path] . identifier[join] ( identifier[EVEREST_SRC] , literal[string] , literal[string] ,
literal[string] , literal[string] %( identifier[int] ( identifier[c] ), identifier[model] ))
identifier[e0] , identifier[_] , identifier[_] , identifier[c0] , identifier[_] , identifier[_] , identifier[_] , identifier[_] , identifier[s0] = identifier[np] . identifier[loadtxt] ( identifier[f] , identifier[unpack] = keyword[True] , identifier[skiprows] = literal[int] )
keyword[for] identifier[i] , identifier[e] keyword[in] identifier[enumerate] ( identifier[epic] ):
keyword[if] identifier[e] keyword[in] identifier[e0] :
identifier[j] = identifier[np] . identifier[argmax] ( identifier[e0] == identifier[e] )
identifier[cdpp] [ identifier[i] ]= identifier[c0] [ identifier[j] ]
identifier[saturated] [ identifier[i] ]= identifier[s0] [ identifier[j] ]
identifier[f] = identifier[os] . identifier[path] . identifier[join] ( identifier[EVEREST_SRC] , literal[string] , literal[string] , literal[string] ,
literal[string] %( identifier[int] ( identifier[c] ), identifier[compare_to] . identifier[lower] ()))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[f] ):
keyword[continue]
keyword[if] identifier[compare_to] . identifier[lower] () keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[e1] , identifier[c1] = identifier[np] . identifier[loadtxt] ( identifier[f] , identifier[unpack] = keyword[True] , identifier[skiprows] = literal[int] )
keyword[else] :
identifier[e1] , identifier[_] , identifier[_] , identifier[c1] , identifier[_] , identifier[_] , identifier[_] , identifier[_] , identifier[_] = identifier[np] . identifier[loadtxt] (
identifier[f] , identifier[unpack] = keyword[True] , identifier[skiprows] = literal[int] )
keyword[for] identifier[i] , identifier[e] keyword[in] identifier[enumerate] ( identifier[epic] ):
keyword[if] identifier[e] keyword[in] identifier[e1] :
identifier[j] = identifier[np] . identifier[argmax] ( identifier[e1] == identifier[e] )
identifier[cdpp_1] [ identifier[i] ]= identifier[c1] [ identifier[j] ]
identifier[sat] = identifier[np] . identifier[where] ( identifier[saturated] == literal[int] )
identifier[unsat] = identifier[np] . identifier[where] ( identifier[saturated] == literal[int] )
identifier[fig] , identifier[ax] = identifier[pl] . identifier[subplots] ( literal[int] )
identifier[fig] . identifier[canvas] . identifier[set_window_title] (
literal[string] %( identifier[model] , identifier[compare_to] ))
identifier[x] = identifier[kp]
identifier[y] =( identifier[cdpp] - identifier[cdpp_1] )/ identifier[cdpp_1]
identifier[ax] . identifier[scatter] ( identifier[x] [ identifier[unsat] ], identifier[y] [ identifier[unsat] ], identifier[color] = literal[string] , identifier[marker] = literal[string] ,
identifier[alpha] = literal[int] , identifier[zorder] =- literal[int] , identifier[picker] = keyword[True] )
identifier[ax] . identifier[scatter] ( identifier[x] [ identifier[sat] ], identifier[y] [ identifier[sat] ], identifier[color] = literal[string] , identifier[marker] = literal[string] ,
identifier[alpha] = literal[int] , identifier[zorder] =- literal[int] , identifier[picker] = keyword[True] )
identifier[ax] . identifier[set_ylim] (- literal[int] , literal[int] )
identifier[ax] . identifier[set_xlim] ( literal[int] , literal[int] )
identifier[ax] . identifier[axhline] ( literal[int] , identifier[color] = literal[string] , identifier[lw] = literal[int] , identifier[zorder] =- literal[int] , identifier[alpha] = literal[int] )
identifier[ax] . identifier[axhline] ( literal[int] , identifier[color] = literal[string] , identifier[ls] = literal[string] , identifier[lw] = literal[int] , identifier[zorder] =- literal[int] , identifier[alpha] = literal[int] )
identifier[ax] . identifier[axhline] (- literal[int] , identifier[color] = literal[string] , identifier[ls] = literal[string] , identifier[lw] = literal[int] , identifier[zorder] =- literal[int] , identifier[alpha] = literal[int] )
identifier[ax] . identifier[set_title] ( literal[string] , identifier[fontsize] = literal[int] )
identifier[ax] . identifier[set_ylabel] ( literal[string] , identifier[fontsize] = literal[int] )
identifier[ax] . identifier[set_xlabel] ( literal[string] , identifier[fontsize] = literal[int] )
identifier[Picker] = identifier[StatsPicker] ([ identifier[ax] ],[ identifier[kp] ],[ identifier[y] ], identifier[epic] ,
identifier[model] = identifier[model] , identifier[compare_to] = identifier[compare_to] )
identifier[fig] . identifier[canvas] . identifier[mpl_connect] ( literal[string] , identifier[Picker] )
identifier[pl] . identifier[show] () | def PlanetStatistics(model='nPLD', compare_to='k2sff', **kwargs):
"""
Computes and plots the CDPP statistics comparison between `model` and
`compare_to` for all known K2 planets.
:param str model: The :py:obj:`everest` model name
:param str compare_to: The :py:obj:`everest` model name or other K2 pipeline name
"""
# Load all planet hosts
f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'planets.tsv')
(epic, campaign, kp, _, _, _, _, _, _) = np.loadtxt(f, unpack=True, skiprows=2)
epic = np.array(epic, dtype=int)
campaign = np.array(campaign, dtype=int)
cdpp = np.zeros(len(epic))
saturated = np.zeros(len(epic), dtype=int)
cdpp_1 = np.zeros(len(epic))
# Get the stats
for c in set(campaign):
# Everest model
f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(c), model))
(e0, _, _, c0, _, _, _, _, s0) = np.loadtxt(f, unpack=True, skiprows=2)
for (i, e) in enumerate(epic):
if e in e0:
j = np.argmax(e0 == e)
cdpp[i] = c0[j]
saturated[i] = s0[j] # depends on [control=['if'], data=['e', 'e0']] # depends on [control=['for'], data=[]]
# Comparison model
f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(c), compare_to.lower()))
if not os.path.exists(f):
continue # depends on [control=['if'], data=[]]
if compare_to.lower() in ['everest1', 'k2sff', 'k2sc']:
(e1, c1) = np.loadtxt(f, unpack=True, skiprows=2) # depends on [control=['if'], data=[]]
else:
(e1, _, _, c1, _, _, _, _, _) = np.loadtxt(f, unpack=True, skiprows=2)
for (i, e) in enumerate(epic):
if e in e1:
j = np.argmax(e1 == e)
cdpp_1[i] = c1[j] # depends on [control=['if'], data=['e', 'e1']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['c']]
sat = np.where(saturated == 1)
unsat = np.where(saturated == 0)
# Plot the equivalent of the Aigrain+16 figure
(fig, ax) = pl.subplots(1)
fig.canvas.set_window_title('K2 Planet Hosts: %s versus %s' % (model, compare_to))
x = kp
y = (cdpp - cdpp_1) / cdpp_1
ax.scatter(x[unsat], y[unsat], color='b', marker='.', alpha=0.5, zorder=-1, picker=True)
ax.scatter(x[sat], y[sat], color='r', marker='.', alpha=0.5, zorder=-1, picker=True)
ax.set_ylim(-1, 1)
ax.set_xlim(8, 18)
ax.axhline(0, color='gray', lw=2, zorder=-99, alpha=0.5)
ax.axhline(0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5)
ax.axhline(-0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5)
ax.set_title('K2 Planet Hosts', fontsize=18)
ax.set_ylabel('Relative CDPP', fontsize=18)
ax.set_xlabel('Kepler Magnitude', fontsize=18)
# Pickable points
Picker = StatsPicker([ax], [kp], [y], epic, model=model, compare_to=compare_to)
fig.canvas.mpl_connect('pick_event', Picker)
# Show
pl.show() |
def _next_rId(self):
"""
Next available rId in collection, starting from 'rId1' and making use
of any gaps in numbering, e.g. 'rId2' for rIds ['rId1', 'rId3'].
"""
for n in range(1, len(self)+2):
rId_candidate = 'rId%d' % n # like 'rId19'
if rId_candidate not in self:
return rId_candidate | def function[_next_rId, parameter[self]]:
constant[
Next available rId in collection, starting from 'rId1' and making use
of any gaps in numbering, e.g. 'rId2' for rIds ['rId1', 'rId3'].
]
for taget[name[n]] in starred[call[name[range], parameter[constant[1], binary_operation[call[name[len], parameter[name[self]]] + constant[2]]]]] begin[:]
variable[rId_candidate] assign[=] binary_operation[constant[rId%d] <ast.Mod object at 0x7da2590d6920> name[n]]
if compare[name[rId_candidate] <ast.NotIn object at 0x7da2590d7190> name[self]] begin[:]
return[name[rId_candidate]] | keyword[def] identifier[_next_rId] ( identifier[self] ):
literal[string]
keyword[for] identifier[n] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[self] )+ literal[int] ):
identifier[rId_candidate] = literal[string] % identifier[n]
keyword[if] identifier[rId_candidate] keyword[not] keyword[in] identifier[self] :
keyword[return] identifier[rId_candidate] | def _next_rId(self):
"""
Next available rId in collection, starting from 'rId1' and making use
of any gaps in numbering, e.g. 'rId2' for rIds ['rId1', 'rId3'].
"""
for n in range(1, len(self) + 2):
rId_candidate = 'rId%d' % n # like 'rId19'
if rId_candidate not in self:
return rId_candidate # depends on [control=['if'], data=['rId_candidate']] # depends on [control=['for'], data=['n']] |
def format_listeners(elb_settings=None, env='dev', region='us-east-1'):
"""Format ELB Listeners into standard list.
Args:
elb_settings (dict): ELB settings including ELB Listeners to add,
e.g.::
# old
{
"certificate": null,
"i_port": 8080,
"lb_port": 80,
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
# new
{
"ports": [
{
"instance": "HTTP:8080",
"loadbalancer": "HTTP:80"
},
{
"certificate": "cert_name",
"instance": "HTTP:8443",
"loadbalancer": "HTTPS:443"
}
],
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
env (str): Environment to find the Account Number for.
Returns:
list: ELB Listeners formatted into dicts for Spinnaker::
[
{
'externalPort': 80,
'externalProtocol': 'HTTP',
'internalPort': 8080,
'internalProtocol': 'HTTP',
'sslCertificateId': None,
'listenerPolicies': [],
'backendPolicies': []
},
...
]
"""
LOG.debug('ELB settings:\n%s', elb_settings)
credential = get_env_credential(env=env)
account = credential['accountId']
listeners = []
if 'ports' in elb_settings:
for listener in elb_settings['ports']:
cert_name = format_cert_name(
env=env, region=region, account=account, certificate=listener.get('certificate', None))
lb_proto, lb_port = listener['loadbalancer'].split(':')
i_proto, i_port = listener['instance'].split(':')
listener_policies = listener.get('policies', [])
listener_policies += listener.get('listener_policies', [])
backend_policies = listener.get('backend_policies', [])
elb_data = {
'externalPort': int(lb_port),
'externalProtocol': lb_proto.upper(),
'internalPort': int(i_port),
'internalProtocol': i_proto.upper(),
'sslCertificateId': cert_name,
'listenerPolicies': listener_policies,
'backendPolicies': backend_policies,
}
listeners.append(elb_data)
else:
listener_policies = elb_settings.get('policies', [])
listener_policies += elb_settings.get('listener_policies', [])
backend_policies = elb_settings.get('backend_policies', [])
listeners = [{
'externalPort': int(elb_settings['lb_port']),
'externalProtocol': elb_settings['lb_proto'],
'internalPort': int(elb_settings['i_port']),
'internalProtocol': elb_settings['i_proto'],
'sslCertificateId': elb_settings['certificate'],
'listenerPolicies': listener_policies,
'backendPolicies': backend_policies,
}]
for listener in listeners:
LOG.info('ELB Listener:\n'
'loadbalancer %(externalProtocol)s:%(externalPort)d\n'
'instance %(internalProtocol)s:%(internalPort)d\n'
'certificate: %(sslCertificateId)s\n'
'listener_policies: %(listenerPolicies)s\n'
'backend_policies: %(backendPolicies)s', listener)
return listeners | def function[format_listeners, parameter[elb_settings, env, region]]:
constant[Format ELB Listeners into standard list.
Args:
elb_settings (dict): ELB settings including ELB Listeners to add,
e.g.::
# old
{
"certificate": null,
"i_port": 8080,
"lb_port": 80,
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
# new
{
"ports": [
{
"instance": "HTTP:8080",
"loadbalancer": "HTTP:80"
},
{
"certificate": "cert_name",
"instance": "HTTP:8443",
"loadbalancer": "HTTPS:443"
}
],
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
env (str): Environment to find the Account Number for.
Returns:
list: ELB Listeners formatted into dicts for Spinnaker::
[
{
'externalPort': 80,
'externalProtocol': 'HTTP',
'internalPort': 8080,
'internalProtocol': 'HTTP',
'sslCertificateId': None,
'listenerPolicies': [],
'backendPolicies': []
},
...
]
]
call[name[LOG].debug, parameter[constant[ELB settings:
%s], name[elb_settings]]]
variable[credential] assign[=] call[name[get_env_credential], parameter[]]
variable[account] assign[=] call[name[credential]][constant[accountId]]
variable[listeners] assign[=] list[[]]
if compare[constant[ports] in name[elb_settings]] begin[:]
for taget[name[listener]] in starred[call[name[elb_settings]][constant[ports]]] begin[:]
variable[cert_name] assign[=] call[name[format_cert_name], parameter[]]
<ast.Tuple object at 0x7da207f03910> assign[=] call[call[name[listener]][constant[loadbalancer]].split, parameter[constant[:]]]
<ast.Tuple object at 0x7da207f02410> assign[=] call[call[name[listener]][constant[instance]].split, parameter[constant[:]]]
variable[listener_policies] assign[=] call[name[listener].get, parameter[constant[policies], list[[]]]]
<ast.AugAssign object at 0x7da207f03640>
variable[backend_policies] assign[=] call[name[listener].get, parameter[constant[backend_policies], list[[]]]]
variable[elb_data] assign[=] dictionary[[<ast.Constant object at 0x7da207f00280>, <ast.Constant object at 0x7da207f02590>, <ast.Constant object at 0x7da207f01f90>, <ast.Constant object at 0x7da207f03220>, <ast.Constant object at 0x7da207f03010>, <ast.Constant object at 0x7da207f020e0>, <ast.Constant object at 0x7da207f03970>], [<ast.Call object at 0x7da207f03430>, <ast.Call object at 0x7da207f00bb0>, <ast.Call object at 0x7da207f00eb0>, <ast.Call object at 0x7da207f02ef0>, <ast.Name object at 0x7da207f01d20>, <ast.Name object at 0x7da207f01e10>, <ast.Name object at 0x7da207f00df0>]]
call[name[listeners].append, parameter[name[elb_data]]]
for taget[name[listener]] in starred[name[listeners]] begin[:]
call[name[LOG].info, parameter[constant[ELB Listener:
loadbalancer %(externalProtocol)s:%(externalPort)d
instance %(internalProtocol)s:%(internalPort)d
certificate: %(sslCertificateId)s
listener_policies: %(listenerPolicies)s
backend_policies: %(backendPolicies)s], name[listener]]]
return[name[listeners]] | keyword[def] identifier[format_listeners] ( identifier[elb_settings] = keyword[None] , identifier[env] = literal[string] , identifier[region] = literal[string] ):
literal[string]
identifier[LOG] . identifier[debug] ( literal[string] , identifier[elb_settings] )
identifier[credential] = identifier[get_env_credential] ( identifier[env] = identifier[env] )
identifier[account] = identifier[credential] [ literal[string] ]
identifier[listeners] =[]
keyword[if] literal[string] keyword[in] identifier[elb_settings] :
keyword[for] identifier[listener] keyword[in] identifier[elb_settings] [ literal[string] ]:
identifier[cert_name] = identifier[format_cert_name] (
identifier[env] = identifier[env] , identifier[region] = identifier[region] , identifier[account] = identifier[account] , identifier[certificate] = identifier[listener] . identifier[get] ( literal[string] , keyword[None] ))
identifier[lb_proto] , identifier[lb_port] = identifier[listener] [ literal[string] ]. identifier[split] ( literal[string] )
identifier[i_proto] , identifier[i_port] = identifier[listener] [ literal[string] ]. identifier[split] ( literal[string] )
identifier[listener_policies] = identifier[listener] . identifier[get] ( literal[string] ,[])
identifier[listener_policies] += identifier[listener] . identifier[get] ( literal[string] ,[])
identifier[backend_policies] = identifier[listener] . identifier[get] ( literal[string] ,[])
identifier[elb_data] ={
literal[string] : identifier[int] ( identifier[lb_port] ),
literal[string] : identifier[lb_proto] . identifier[upper] (),
literal[string] : identifier[int] ( identifier[i_port] ),
literal[string] : identifier[i_proto] . identifier[upper] (),
literal[string] : identifier[cert_name] ,
literal[string] : identifier[listener_policies] ,
literal[string] : identifier[backend_policies] ,
}
identifier[listeners] . identifier[append] ( identifier[elb_data] )
keyword[else] :
identifier[listener_policies] = identifier[elb_settings] . identifier[get] ( literal[string] ,[])
identifier[listener_policies] += identifier[elb_settings] . identifier[get] ( literal[string] ,[])
identifier[backend_policies] = identifier[elb_settings] . identifier[get] ( literal[string] ,[])
identifier[listeners] =[{
literal[string] : identifier[int] ( identifier[elb_settings] [ literal[string] ]),
literal[string] : identifier[elb_settings] [ literal[string] ],
literal[string] : identifier[int] ( identifier[elb_settings] [ literal[string] ]),
literal[string] : identifier[elb_settings] [ literal[string] ],
literal[string] : identifier[elb_settings] [ literal[string] ],
literal[string] : identifier[listener_policies] ,
literal[string] : identifier[backend_policies] ,
}]
keyword[for] identifier[listener] keyword[in] identifier[listeners] :
identifier[LOG] . identifier[info] ( literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] , identifier[listener] )
keyword[return] identifier[listeners] | def format_listeners(elb_settings=None, env='dev', region='us-east-1'):
"""Format ELB Listeners into standard list.
Args:
elb_settings (dict): ELB settings including ELB Listeners to add,
e.g.::
# old
{
"certificate": null,
"i_port": 8080,
"lb_port": 80,
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
# new
{
"ports": [
{
"instance": "HTTP:8080",
"loadbalancer": "HTTP:80"
},
{
"certificate": "cert_name",
"instance": "HTTP:8443",
"loadbalancer": "HTTPS:443"
}
],
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
env (str): Environment to find the Account Number for.
Returns:
list: ELB Listeners formatted into dicts for Spinnaker::
[
{
'externalPort': 80,
'externalProtocol': 'HTTP',
'internalPort': 8080,
'internalProtocol': 'HTTP',
'sslCertificateId': None,
'listenerPolicies': [],
'backendPolicies': []
},
...
]
"""
LOG.debug('ELB settings:\n%s', elb_settings)
credential = get_env_credential(env=env)
account = credential['accountId']
listeners = []
if 'ports' in elb_settings:
for listener in elb_settings['ports']:
cert_name = format_cert_name(env=env, region=region, account=account, certificate=listener.get('certificate', None))
(lb_proto, lb_port) = listener['loadbalancer'].split(':')
(i_proto, i_port) = listener['instance'].split(':')
listener_policies = listener.get('policies', [])
listener_policies += listener.get('listener_policies', [])
backend_policies = listener.get('backend_policies', [])
elb_data = {'externalPort': int(lb_port), 'externalProtocol': lb_proto.upper(), 'internalPort': int(i_port), 'internalProtocol': i_proto.upper(), 'sslCertificateId': cert_name, 'listenerPolicies': listener_policies, 'backendPolicies': backend_policies}
listeners.append(elb_data) # depends on [control=['for'], data=['listener']] # depends on [control=['if'], data=['elb_settings']]
else:
listener_policies = elb_settings.get('policies', [])
listener_policies += elb_settings.get('listener_policies', [])
backend_policies = elb_settings.get('backend_policies', [])
listeners = [{'externalPort': int(elb_settings['lb_port']), 'externalProtocol': elb_settings['lb_proto'], 'internalPort': int(elb_settings['i_port']), 'internalProtocol': elb_settings['i_proto'], 'sslCertificateId': elb_settings['certificate'], 'listenerPolicies': listener_policies, 'backendPolicies': backend_policies}]
for listener in listeners:
LOG.info('ELB Listener:\nloadbalancer %(externalProtocol)s:%(externalPort)d\ninstance %(internalProtocol)s:%(internalPort)d\ncertificate: %(sslCertificateId)s\nlistener_policies: %(listenerPolicies)s\nbackend_policies: %(backendPolicies)s', listener) # depends on [control=['for'], data=['listener']]
return listeners |
def forward(self, speed=1, **kwargs):
"""
Drive the robot forward by running both motors forward.
:param float speed:
Speed at which to drive the motors, as a value between 0 (stopped)
and 1 (full speed). The default is 1.
:param float curve_left:
The amount to curve left while moving forwards, by driving the
left motor at a slower speed. Maximum *curve_left* is 1, the
default is 0 (no curve). This parameter can only be specified as a
keyword parameter, and is mutually exclusive with *curve_right*.
:param float curve_right:
The amount to curve right while moving forwards, by driving the
right motor at a slower speed. Maximum *curve_right* is 1, the
default is 0 (no curve). This parameter can only be specified as a
keyword parameter, and is mutually exclusive with *curve_left*.
"""
curve_left = kwargs.pop('curve_left', 0)
curve_right = kwargs.pop('curve_right', 0)
if kwargs:
raise TypeError('unexpected argument %s' % kwargs.popitem()[0])
if not 0 <= curve_left <= 1:
raise ValueError('curve_left must be between 0 and 1')
if not 0 <= curve_right <= 1:
raise ValueError('curve_right must be between 0 and 1')
if curve_left != 0 and curve_right != 0:
raise ValueError("curve_left and curve_right can't be used at "
"the same time")
self.left_motor.forward(speed * (1 - curve_left))
self.right_motor.forward(speed * (1 - curve_right)) | def function[forward, parameter[self, speed]]:
constant[
Drive the robot forward by running both motors forward.
:param float speed:
Speed at which to drive the motors, as a value between 0 (stopped)
and 1 (full speed). The default is 1.
:param float curve_left:
The amount to curve left while moving forwards, by driving the
left motor at a slower speed. Maximum *curve_left* is 1, the
default is 0 (no curve). This parameter can only be specified as a
keyword parameter, and is mutually exclusive with *curve_right*.
:param float curve_right:
The amount to curve right while moving forwards, by driving the
right motor at a slower speed. Maximum *curve_right* is 1, the
default is 0 (no curve). This parameter can only be specified as a
keyword parameter, and is mutually exclusive with *curve_left*.
]
variable[curve_left] assign[=] call[name[kwargs].pop, parameter[constant[curve_left], constant[0]]]
variable[curve_right] assign[=] call[name[kwargs].pop, parameter[constant[curve_right], constant[0]]]
if name[kwargs] begin[:]
<ast.Raise object at 0x7da207f9ab90>
if <ast.UnaryOp object at 0x7da207f98f70> begin[:]
<ast.Raise object at 0x7da207f9b760>
if <ast.UnaryOp object at 0x7da1b26afcd0> begin[:]
<ast.Raise object at 0x7da1b26ae3b0>
if <ast.BoolOp object at 0x7da1b26ae170> begin[:]
<ast.Raise object at 0x7da1b26ac850>
call[name[self].left_motor.forward, parameter[binary_operation[name[speed] * binary_operation[constant[1] - name[curve_left]]]]]
call[name[self].right_motor.forward, parameter[binary_operation[name[speed] * binary_operation[constant[1] - name[curve_right]]]]] | keyword[def] identifier[forward] ( identifier[self] , identifier[speed] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[curve_left] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[int] )
identifier[curve_right] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[int] )
keyword[if] identifier[kwargs] :
keyword[raise] identifier[TypeError] ( literal[string] % identifier[kwargs] . identifier[popitem] ()[ literal[int] ])
keyword[if] keyword[not] literal[int] <= identifier[curve_left] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] literal[int] <= identifier[curve_right] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[curve_left] != literal[int] keyword[and] identifier[curve_right] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[self] . identifier[left_motor] . identifier[forward] ( identifier[speed] *( literal[int] - identifier[curve_left] ))
identifier[self] . identifier[right_motor] . identifier[forward] ( identifier[speed] *( literal[int] - identifier[curve_right] )) | def forward(self, speed=1, **kwargs):
"""
Drive the robot forward by running both motors forward.
:param float speed:
Speed at which to drive the motors, as a value between 0 (stopped)
and 1 (full speed). The default is 1.
:param float curve_left:
The amount to curve left while moving forwards, by driving the
left motor at a slower speed. Maximum *curve_left* is 1, the
default is 0 (no curve). This parameter can only be specified as a
keyword parameter, and is mutually exclusive with *curve_right*.
:param float curve_right:
The amount to curve right while moving forwards, by driving the
right motor at a slower speed. Maximum *curve_right* is 1, the
default is 0 (no curve). This parameter can only be specified as a
keyword parameter, and is mutually exclusive with *curve_left*.
"""
curve_left = kwargs.pop('curve_left', 0)
curve_right = kwargs.pop('curve_right', 0)
if kwargs:
raise TypeError('unexpected argument %s' % kwargs.popitem()[0]) # depends on [control=['if'], data=[]]
if not 0 <= curve_left <= 1:
raise ValueError('curve_left must be between 0 and 1') # depends on [control=['if'], data=[]]
if not 0 <= curve_right <= 1:
raise ValueError('curve_right must be between 0 and 1') # depends on [control=['if'], data=[]]
if curve_left != 0 and curve_right != 0:
raise ValueError("curve_left and curve_right can't be used at the same time") # depends on [control=['if'], data=[]]
self.left_motor.forward(speed * (1 - curve_left))
self.right_motor.forward(speed * (1 - curve_right)) |
def create_symbol(self, *args, **kwargs):
"""
Extensions that discover and create instances of `symbols.Symbol`
should do this through this method, as it will keep an index
of these which can be used when generating a "naive index".
See `database.Database.create_symbol` for more
information.
Args:
args: see `database.Database.create_symbol`
kwargs: see `database.Database.create_symbol`
Returns:
symbols.Symbol: the created symbol, or `None`.
"""
if not kwargs.get('project_name'):
kwargs['project_name'] = self.project.project_name
sym = self.app.database.create_symbol(*args, **kwargs)
if sym:
# pylint: disable=unidiomatic-typecheck
if type(sym) != Symbol:
self._created_symbols[sym.filename].add(sym.unique_name)
return sym | def function[create_symbol, parameter[self]]:
constant[
Extensions that discover and create instances of `symbols.Symbol`
should do this through this method, as it will keep an index
of these which can be used when generating a "naive index".
See `database.Database.create_symbol` for more
information.
Args:
args: see `database.Database.create_symbol`
kwargs: see `database.Database.create_symbol`
Returns:
symbols.Symbol: the created symbol, or `None`.
]
if <ast.UnaryOp object at 0x7da20c6a86a0> begin[:]
call[name[kwargs]][constant[project_name]] assign[=] name[self].project.project_name
variable[sym] assign[=] call[name[self].app.database.create_symbol, parameter[<ast.Starred object at 0x7da2047e9180>]]
if name[sym] begin[:]
if compare[call[name[type], parameter[name[sym]]] not_equal[!=] name[Symbol]] begin[:]
call[call[name[self]._created_symbols][name[sym].filename].add, parameter[name[sym].unique_name]]
return[name[sym]] | keyword[def] identifier[create_symbol] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[kwargs] . identifier[get] ( literal[string] ):
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[project] . identifier[project_name]
identifier[sym] = identifier[self] . identifier[app] . identifier[database] . identifier[create_symbol] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[sym] :
keyword[if] identifier[type] ( identifier[sym] )!= identifier[Symbol] :
identifier[self] . identifier[_created_symbols] [ identifier[sym] . identifier[filename] ]. identifier[add] ( identifier[sym] . identifier[unique_name] )
keyword[return] identifier[sym] | def create_symbol(self, *args, **kwargs):
"""
Extensions that discover and create instances of `symbols.Symbol`
should do this through this method, as it will keep an index
of these which can be used when generating a "naive index".
See `database.Database.create_symbol` for more
information.
Args:
args: see `database.Database.create_symbol`
kwargs: see `database.Database.create_symbol`
Returns:
symbols.Symbol: the created symbol, or `None`.
"""
if not kwargs.get('project_name'):
kwargs['project_name'] = self.project.project_name # depends on [control=['if'], data=[]]
sym = self.app.database.create_symbol(*args, **kwargs)
if sym:
# pylint: disable=unidiomatic-typecheck
if type(sym) != Symbol:
self._created_symbols[sym.filename].add(sym.unique_name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return sym |
def remove_listener(self, event_name, listener):
"""Removes a listener."""
self.listeners[event_name].remove(listener)
return self | def function[remove_listener, parameter[self, event_name, listener]]:
constant[Removes a listener.]
call[call[name[self].listeners][name[event_name]].remove, parameter[name[listener]]]
return[name[self]] | keyword[def] identifier[remove_listener] ( identifier[self] , identifier[event_name] , identifier[listener] ):
literal[string]
identifier[self] . identifier[listeners] [ identifier[event_name] ]. identifier[remove] ( identifier[listener] )
keyword[return] identifier[self] | def remove_listener(self, event_name, listener):
"""Removes a listener."""
self.listeners[event_name].remove(listener)
return self |
async def handle_agent_message(self, agent_addr, message):
"""Dispatch messages received from agents to the right handlers"""
message_handlers = {
AgentHello: self.handle_agent_hello,
AgentJobStarted: self.handle_agent_job_started,
AgentJobDone: self.handle_agent_job_done,
AgentJobSSHDebug: self.handle_agent_job_ssh_debug,
Pong: self._handle_pong
}
try:
func = message_handlers[message.__class__]
except:
raise TypeError("Unknown message type %s" % message.__class__)
self._create_safe_task(func(agent_addr, message)) | <ast.AsyncFunctionDef object at 0x7da20c7cbf10> | keyword[async] keyword[def] identifier[handle_agent_message] ( identifier[self] , identifier[agent_addr] , identifier[message] ):
literal[string]
identifier[message_handlers] ={
identifier[AgentHello] : identifier[self] . identifier[handle_agent_hello] ,
identifier[AgentJobStarted] : identifier[self] . identifier[handle_agent_job_started] ,
identifier[AgentJobDone] : identifier[self] . identifier[handle_agent_job_done] ,
identifier[AgentJobSSHDebug] : identifier[self] . identifier[handle_agent_job_ssh_debug] ,
identifier[Pong] : identifier[self] . identifier[_handle_pong]
}
keyword[try] :
identifier[func] = identifier[message_handlers] [ identifier[message] . identifier[__class__] ]
keyword[except] :
keyword[raise] identifier[TypeError] ( literal[string] % identifier[message] . identifier[__class__] )
identifier[self] . identifier[_create_safe_task] ( identifier[func] ( identifier[agent_addr] , identifier[message] )) | async def handle_agent_message(self, agent_addr, message):
"""Dispatch messages received from agents to the right handlers"""
message_handlers = {AgentHello: self.handle_agent_hello, AgentJobStarted: self.handle_agent_job_started, AgentJobDone: self.handle_agent_job_done, AgentJobSSHDebug: self.handle_agent_job_ssh_debug, Pong: self._handle_pong}
try:
func = message_handlers[message.__class__] # depends on [control=['try'], data=[]]
except:
raise TypeError('Unknown message type %s' % message.__class__) # depends on [control=['except'], data=[]]
self._create_safe_task(func(agent_addr, message)) |
def _same_day_ids(self):
"""
:return: ids of occurrences that finish on the same day that they
start, or midnight the next day.
"""
# we can pre-filter to return only occurrences that are <=24h long,
# but until at least the `__date` can be used in F() statements
# we'll have to refine manually
qs = self.filter(end__lte=F('start') + timedelta(days=1))
# filter occurrences to those sharing the same end date, or
# midnight the next day (unless it's an all-day occurrence)
ids = [o.id for o in qs if (
(o.local_start.date() == o.local_end.date()) or
(
o.local_end.time() == time(0,0) and
o.local_end.date() == o.local_start.date() + timedelta(days=1) and
o.is_all_day == False
)
)]
return ids | def function[_same_day_ids, parameter[self]]:
constant[
:return: ids of occurrences that finish on the same day that they
start, or midnight the next day.
]
variable[qs] assign[=] call[name[self].filter, parameter[]]
variable[ids] assign[=] <ast.ListComp object at 0x7da20c6a8670>
return[name[ids]] | keyword[def] identifier[_same_day_ids] ( identifier[self] ):
literal[string]
identifier[qs] = identifier[self] . identifier[filter] ( identifier[end__lte] = identifier[F] ( literal[string] )+ identifier[timedelta] ( identifier[days] = literal[int] ))
identifier[ids] =[ identifier[o] . identifier[id] keyword[for] identifier[o] keyword[in] identifier[qs] keyword[if] (
( identifier[o] . identifier[local_start] . identifier[date] ()== identifier[o] . identifier[local_end] . identifier[date] ()) keyword[or]
(
identifier[o] . identifier[local_end] . identifier[time] ()== identifier[time] ( literal[int] , literal[int] ) keyword[and]
identifier[o] . identifier[local_end] . identifier[date] ()== identifier[o] . identifier[local_start] . identifier[date] ()+ identifier[timedelta] ( identifier[days] = literal[int] ) keyword[and]
identifier[o] . identifier[is_all_day] == keyword[False]
)
)]
keyword[return] identifier[ids] | def _same_day_ids(self):
"""
:return: ids of occurrences that finish on the same day that they
start, or midnight the next day.
"""
# we can pre-filter to return only occurrences that are <=24h long,
# but until at least the `__date` can be used in F() statements
# we'll have to refine manually
qs = self.filter(end__lte=F('start') + timedelta(days=1))
# filter occurrences to those sharing the same end date, or
# midnight the next day (unless it's an all-day occurrence)
ids = [o.id for o in qs if o.local_start.date() == o.local_end.date() or (o.local_end.time() == time(0, 0) and o.local_end.date() == o.local_start.date() + timedelta(days=1) and (o.is_all_day == False))]
return ids |
def jensen_shannon(h1, h2): # 85 us @array, 110 us @list \w 100 bins
r"""
Jensen-Shannon divergence.
A symmetric and numerically more stable empirical extension of the Kullback-Leibler
divergence.
The Jensen Shannon divergence between two histograms :math:`H` and :math:`H'` of size
:math:`m` is defined as:
.. math::
d_{JSD}(H, H') =
\frac{1}{2} d_{KL}(H, H^*) +
\frac{1}{2} d_{KL}(H', H^*)
with :math:`H^*=\frac{1}{2}(H + H')`.
*Attributes:*
- semimetric
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, 1]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[0, \infty)`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram, same bins as ``h1``.
Returns
-------
jensen_shannon : float
Jensen-Shannon divergence.
"""
h1, h2 = __prepare_histogram(h1, h2)
s = (h1 + h2) / 2.
return __kullback_leibler(h1, s) / 2. + __kullback_leibler(h2, s) / 2. | def function[jensen_shannon, parameter[h1, h2]]:
constant[
Jensen-Shannon divergence.
A symmetric and numerically more stable empirical extension of the Kullback-Leibler
divergence.
The Jensen Shannon divergence between two histograms :math:`H` and :math:`H'` of size
:math:`m` is defined as:
.. math::
d_{JSD}(H, H') =
\frac{1}{2} d_{KL}(H, H^*) +
\frac{1}{2} d_{KL}(H', H^*)
with :math:`H^*=\frac{1}{2}(H + H')`.
*Attributes:*
- semimetric
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, 1]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[0, \infty)`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram, same bins as ``h1``.
Returns
-------
jensen_shannon : float
Jensen-Shannon divergence.
]
<ast.Tuple object at 0x7da1b1516d70> assign[=] call[name[__prepare_histogram], parameter[name[h1], name[h2]]]
variable[s] assign[=] binary_operation[binary_operation[name[h1] + name[h2]] / constant[2.0]]
return[binary_operation[binary_operation[call[name[__kullback_leibler], parameter[name[h1], name[s]]] / constant[2.0]] + binary_operation[call[name[__kullback_leibler], parameter[name[h2], name[s]]] / constant[2.0]]]] | keyword[def] identifier[jensen_shannon] ( identifier[h1] , identifier[h2] ):
literal[string]
identifier[h1] , identifier[h2] = identifier[__prepare_histogram] ( identifier[h1] , identifier[h2] )
identifier[s] =( identifier[h1] + identifier[h2] )/ literal[int]
keyword[return] identifier[__kullback_leibler] ( identifier[h1] , identifier[s] )/ literal[int] + identifier[__kullback_leibler] ( identifier[h2] , identifier[s] )/ literal[int] | def jensen_shannon(h1, h2): # 85 us @array, 110 us @list \w 100 bins
"\n Jensen-Shannon divergence.\n \n A symmetric and numerically more stable empirical extension of the Kullback-Leibler\n divergence.\n \n The Jensen Shannon divergence between two histograms :math:`H` and :math:`H'` of size\n :math:`m` is defined as:\n \n .. math::\n \n d_{JSD}(H, H') =\n \\frac{1}{2} d_{KL}(H, H^*) +\n \\frac{1}{2} d_{KL}(H', H^*)\n \n with :math:`H^*=\\frac{1}{2}(H + H')`.\n \n *Attributes:*\n\n - semimetric\n \n *Attributes for normalized histograms:*\n\n - :math:`d(H, H')\\in[0, 1]`\n - :math:`d(H, H) = 0`\n - :math:`d(H, H') = d(H', H)`\n \n *Attributes for not-normalized histograms:*\n\n - :math:`d(H, H')\\in[0, \\infty)`\n - :math:`d(H, H) = 0`\n - :math:`d(H, H') = d(H', H)`\n \n *Attributes for not-equal histograms:*\n\n - not applicable\n \n Parameters\n ----------\n h1 : sequence\n The first histogram.\n h2 : sequence\n The second histogram, same bins as ``h1``.\n \n Returns\n -------\n jensen_shannon : float\n Jensen-Shannon divergence. \n\n "
(h1, h2) = __prepare_histogram(h1, h2)
s = (h1 + h2) / 2.0
return __kullback_leibler(h1, s) / 2.0 + __kullback_leibler(h2, s) / 2.0 |
def output_format_lock(self, packages, **kwargs):
""" Text to lock file """
self._output_config['type'] = PLAIN
text = ''
tmp_packages = OrderedDict()
columns = self._config.get_columns()
widths = {}
for _pkg in packages.values():
_pkg_name = _pkg.package_name
_params = _pkg.get_params(columns, merged=True, raw=False)
if _pkg_name not in tmp_packages:
tmp_packages[_pkg_name] = _params
comment = 1
for _col in columns:
widths[_col] = max(widths.get(_col, len(_col)), len(str(_params.get(_col, '')))) + comment
comment = 0
comment = 1
for _col in columns:
text += '{}{} '.format(_col, ' ' * (widths[_col] - len(_col) - comment))
comment = 0
text = '#{}\n'.format(text.strip())
for _pkg_name in sorted(tmp_packages, key=lambda x: str(x).lower()):
_pkg = tmp_packages[_pkg_name]
line = ''
for _col in columns:
line += '{}{} '.format(_pkg[_col], ' ' * (widths[_col] - len(str(_pkg[_col]))))
text += '{}\n'.format(line.strip())
return text | def function[output_format_lock, parameter[self, packages]]:
constant[ Text to lock file ]
call[name[self]._output_config][constant[type]] assign[=] name[PLAIN]
variable[text] assign[=] constant[]
variable[tmp_packages] assign[=] call[name[OrderedDict], parameter[]]
variable[columns] assign[=] call[name[self]._config.get_columns, parameter[]]
variable[widths] assign[=] dictionary[[], []]
for taget[name[_pkg]] in starred[call[name[packages].values, parameter[]]] begin[:]
variable[_pkg_name] assign[=] name[_pkg].package_name
variable[_params] assign[=] call[name[_pkg].get_params, parameter[name[columns]]]
if compare[name[_pkg_name] <ast.NotIn object at 0x7da2590d7190> name[tmp_packages]] begin[:]
call[name[tmp_packages]][name[_pkg_name]] assign[=] name[_params]
variable[comment] assign[=] constant[1]
for taget[name[_col]] in starred[name[columns]] begin[:]
call[name[widths]][name[_col]] assign[=] binary_operation[call[name[max], parameter[call[name[widths].get, parameter[name[_col], call[name[len], parameter[name[_col]]]]], call[name[len], parameter[call[name[str], parameter[call[name[_params].get, parameter[name[_col], constant[]]]]]]]]] + name[comment]]
variable[comment] assign[=] constant[0]
variable[comment] assign[=] constant[1]
for taget[name[_col]] in starred[name[columns]] begin[:]
<ast.AugAssign object at 0x7da18dc9a110>
variable[comment] assign[=] constant[0]
variable[text] assign[=] call[constant[#{}
].format, parameter[call[name[text].strip, parameter[]]]]
for taget[name[_pkg_name]] in starred[call[name[sorted], parameter[name[tmp_packages]]]] begin[:]
variable[_pkg] assign[=] call[name[tmp_packages]][name[_pkg_name]]
variable[line] assign[=] constant[]
for taget[name[_col]] in starred[name[columns]] begin[:]
<ast.AugAssign object at 0x7da20c6c4100>
<ast.AugAssign object at 0x7da18fe90640>
return[name[text]] | keyword[def] identifier[output_format_lock] ( identifier[self] , identifier[packages] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[_output_config] [ literal[string] ]= identifier[PLAIN]
identifier[text] = literal[string]
identifier[tmp_packages] = identifier[OrderedDict] ()
identifier[columns] = identifier[self] . identifier[_config] . identifier[get_columns] ()
identifier[widths] ={}
keyword[for] identifier[_pkg] keyword[in] identifier[packages] . identifier[values] ():
identifier[_pkg_name] = identifier[_pkg] . identifier[package_name]
identifier[_params] = identifier[_pkg] . identifier[get_params] ( identifier[columns] , identifier[merged] = keyword[True] , identifier[raw] = keyword[False] )
keyword[if] identifier[_pkg_name] keyword[not] keyword[in] identifier[tmp_packages] :
identifier[tmp_packages] [ identifier[_pkg_name] ]= identifier[_params]
identifier[comment] = literal[int]
keyword[for] identifier[_col] keyword[in] identifier[columns] :
identifier[widths] [ identifier[_col] ]= identifier[max] ( identifier[widths] . identifier[get] ( identifier[_col] , identifier[len] ( identifier[_col] )), identifier[len] ( identifier[str] ( identifier[_params] . identifier[get] ( identifier[_col] , literal[string] ))))+ identifier[comment]
identifier[comment] = literal[int]
identifier[comment] = literal[int]
keyword[for] identifier[_col] keyword[in] identifier[columns] :
identifier[text] += literal[string] . identifier[format] ( identifier[_col] , literal[string] *( identifier[widths] [ identifier[_col] ]- identifier[len] ( identifier[_col] )- identifier[comment] ))
identifier[comment] = literal[int]
identifier[text] = literal[string] . identifier[format] ( identifier[text] . identifier[strip] ())
keyword[for] identifier[_pkg_name] keyword[in] identifier[sorted] ( identifier[tmp_packages] , identifier[key] = keyword[lambda] identifier[x] : identifier[str] ( identifier[x] ). identifier[lower] ()):
identifier[_pkg] = identifier[tmp_packages] [ identifier[_pkg_name] ]
identifier[line] = literal[string]
keyword[for] identifier[_col] keyword[in] identifier[columns] :
identifier[line] += literal[string] . identifier[format] ( identifier[_pkg] [ identifier[_col] ], literal[string] *( identifier[widths] [ identifier[_col] ]- identifier[len] ( identifier[str] ( identifier[_pkg] [ identifier[_col] ]))))
identifier[text] += literal[string] . identifier[format] ( identifier[line] . identifier[strip] ())
keyword[return] identifier[text] | def output_format_lock(self, packages, **kwargs):
""" Text to lock file """
self._output_config['type'] = PLAIN
text = ''
tmp_packages = OrderedDict()
columns = self._config.get_columns()
widths = {}
for _pkg in packages.values():
_pkg_name = _pkg.package_name
_params = _pkg.get_params(columns, merged=True, raw=False)
if _pkg_name not in tmp_packages:
tmp_packages[_pkg_name] = _params
comment = 1
for _col in columns:
widths[_col] = max(widths.get(_col, len(_col)), len(str(_params.get(_col, '')))) + comment
comment = 0 # depends on [control=['for'], data=['_col']] # depends on [control=['if'], data=['_pkg_name', 'tmp_packages']] # depends on [control=['for'], data=['_pkg']]
comment = 1
for _col in columns:
text += '{}{} '.format(_col, ' ' * (widths[_col] - len(_col) - comment))
comment = 0 # depends on [control=['for'], data=['_col']]
text = '#{}\n'.format(text.strip())
for _pkg_name in sorted(tmp_packages, key=lambda x: str(x).lower()):
_pkg = tmp_packages[_pkg_name]
line = ''
for _col in columns:
line += '{}{} '.format(_pkg[_col], ' ' * (widths[_col] - len(str(_pkg[_col])))) # depends on [control=['for'], data=['_col']]
text += '{}\n'.format(line.strip()) # depends on [control=['for'], data=['_pkg_name']]
return text |
def parse_cwl_type(cwl_type_string):
"""
Parses cwl type information from a cwl type string.
Examples:
- "File[]" -> {'type': 'File', 'isArray': True, 'isOptional': False}
- "int?" -> {'type': 'int', 'isArray': False, 'isOptional': True}
:param cwl_type_string: The cwl type string to extract information from
:return: A dictionary containing information about the parsed cwl type string
"""
is_optional = cwl_type_string.endswith('?')
if is_optional:
cwl_type_string = cwl_type_string[:-1]
is_array = cwl_type_string.endswith('[]')
if is_array:
cwl_type_string = cwl_type_string[:-2]
return {'type': cwl_type_string, 'isArray': is_array, 'isOptional': is_optional} | def function[parse_cwl_type, parameter[cwl_type_string]]:
constant[
Parses cwl type information from a cwl type string.
Examples:
- "File[]" -> {'type': 'File', 'isArray': True, 'isOptional': False}
- "int?" -> {'type': 'int', 'isArray': False, 'isOptional': True}
:param cwl_type_string: The cwl type string to extract information from
:return: A dictionary containing information about the parsed cwl type string
]
variable[is_optional] assign[=] call[name[cwl_type_string].endswith, parameter[constant[?]]]
if name[is_optional] begin[:]
variable[cwl_type_string] assign[=] call[name[cwl_type_string]][<ast.Slice object at 0x7da1b107b2e0>]
variable[is_array] assign[=] call[name[cwl_type_string].endswith, parameter[constant[[]]]]
if name[is_array] begin[:]
variable[cwl_type_string] assign[=] call[name[cwl_type_string]][<ast.Slice object at 0x7da1b1036440>]
return[dictionary[[<ast.Constant object at 0x7da1b1037a00>, <ast.Constant object at 0x7da1b10362c0>, <ast.Constant object at 0x7da1b1034130>], [<ast.Name object at 0x7da1b10357b0>, <ast.Name object at 0x7da1b1037970>, <ast.Name object at 0x7da1b1037fd0>]]] | keyword[def] identifier[parse_cwl_type] ( identifier[cwl_type_string] ):
literal[string]
identifier[is_optional] = identifier[cwl_type_string] . identifier[endswith] ( literal[string] )
keyword[if] identifier[is_optional] :
identifier[cwl_type_string] = identifier[cwl_type_string] [:- literal[int] ]
identifier[is_array] = identifier[cwl_type_string] . identifier[endswith] ( literal[string] )
keyword[if] identifier[is_array] :
identifier[cwl_type_string] = identifier[cwl_type_string] [:- literal[int] ]
keyword[return] { literal[string] : identifier[cwl_type_string] , literal[string] : identifier[is_array] , literal[string] : identifier[is_optional] } | def parse_cwl_type(cwl_type_string):
"""
Parses cwl type information from a cwl type string.
Examples:
- "File[]" -> {'type': 'File', 'isArray': True, 'isOptional': False}
- "int?" -> {'type': 'int', 'isArray': False, 'isOptional': True}
:param cwl_type_string: The cwl type string to extract information from
:return: A dictionary containing information about the parsed cwl type string
"""
is_optional = cwl_type_string.endswith('?')
if is_optional:
cwl_type_string = cwl_type_string[:-1] # depends on [control=['if'], data=[]]
is_array = cwl_type_string.endswith('[]')
if is_array:
cwl_type_string = cwl_type_string[:-2] # depends on [control=['if'], data=[]]
return {'type': cwl_type_string, 'isArray': is_array, 'isOptional': is_optional} |
def get_date_less_query(days, date_field):
"""
Query for if date_field is within number of "days" from now.
"""
query = None
days = get_integer(days)
if days:
future = get_days_from_now(days)
query = Q(**{"%s__lte" % date_field: future.isoformat()})
return query | def function[get_date_less_query, parameter[days, date_field]]:
constant[
Query for if date_field is within number of "days" from now.
]
variable[query] assign[=] constant[None]
variable[days] assign[=] call[name[get_integer], parameter[name[days]]]
if name[days] begin[:]
variable[future] assign[=] call[name[get_days_from_now], parameter[name[days]]]
variable[query] assign[=] call[name[Q], parameter[]]
return[name[query]] | keyword[def] identifier[get_date_less_query] ( identifier[days] , identifier[date_field] ):
literal[string]
identifier[query] = keyword[None]
identifier[days] = identifier[get_integer] ( identifier[days] )
keyword[if] identifier[days] :
identifier[future] = identifier[get_days_from_now] ( identifier[days] )
identifier[query] = identifier[Q] (**{ literal[string] % identifier[date_field] : identifier[future] . identifier[isoformat] ()})
keyword[return] identifier[query] | def get_date_less_query(days, date_field):
"""
Query for if date_field is within number of "days" from now.
"""
query = None
days = get_integer(days)
if days:
future = get_days_from_now(days)
query = Q(**{'%s__lte' % date_field: future.isoformat()}) # depends on [control=['if'], data=[]]
return query |
def yesterday(self, date_from=None, date_format=None):
"""
Retourne la date d'hier depuis maintenant ou depuis une date fournie
:param: :date_from date de référence
:return datetime
"""
# date d'hier
return self.delta(date_from=date_from, date_format=date_format, days=-1) | def function[yesterday, parameter[self, date_from, date_format]]:
constant[
Retourne la date d'hier depuis maintenant ou depuis une date fournie
:param: :date_from date de référence
:return datetime
]
return[call[name[self].delta, parameter[]]] | keyword[def] identifier[yesterday] ( identifier[self] , identifier[date_from] = keyword[None] , identifier[date_format] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[delta] ( identifier[date_from] = identifier[date_from] , identifier[date_format] = identifier[date_format] , identifier[days] =- literal[int] ) | def yesterday(self, date_from=None, date_format=None):
"""
Retourne la date d'hier depuis maintenant ou depuis une date fournie
:param: :date_from date de référence
:return datetime
""" # date d'hier
return self.delta(date_from=date_from, date_format=date_format, days=-1) |
def instances(cls, parent=None):
"""
Returns all the instances that exist for a given parent. If
no parent exists, then a blank list will be returned.
:param parent | <QtGui.QWidget>
:return [<XView>, ..]
"""
if parent is None:
parent = projexui.topWindow()
return parent.findChildren(cls) | def function[instances, parameter[cls, parent]]:
constant[
Returns all the instances that exist for a given parent. If
no parent exists, then a blank list will be returned.
:param parent | <QtGui.QWidget>
:return [<XView>, ..]
]
if compare[name[parent] is constant[None]] begin[:]
variable[parent] assign[=] call[name[projexui].topWindow, parameter[]]
return[call[name[parent].findChildren, parameter[name[cls]]]] | keyword[def] identifier[instances] ( identifier[cls] , identifier[parent] = keyword[None] ):
literal[string]
keyword[if] identifier[parent] keyword[is] keyword[None] :
identifier[parent] = identifier[projexui] . identifier[topWindow] ()
keyword[return] identifier[parent] . identifier[findChildren] ( identifier[cls] ) | def instances(cls, parent=None):
"""
Returns all the instances that exist for a given parent. If
no parent exists, then a blank list will be returned.
:param parent | <QtGui.QWidget>
:return [<XView>, ..]
"""
if parent is None:
parent = projexui.topWindow() # depends on [control=['if'], data=['parent']]
return parent.findChildren(cls) |
def get_args():
"""
request the arguments for running
"""
ap = argparse.ArgumentParser(description="Create frames for a movie that can be compiled using ffmpeg")
ap.add_argument("start", help="date string as start time")
ap.add_argument("end", help="date string as end time")
ap.add_argument("step", type=float, help="fraction of a day to step by")
ap.add_argument("--config", help="path to a config file", default="config.json")
return ap.parse_args() | def function[get_args, parameter[]]:
constant[
request the arguments for running
]
variable[ap] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[ap].add_argument, parameter[constant[start]]]
call[name[ap].add_argument, parameter[constant[end]]]
call[name[ap].add_argument, parameter[constant[step]]]
call[name[ap].add_argument, parameter[constant[--config]]]
return[call[name[ap].parse_args, parameter[]]] | keyword[def] identifier[get_args] ():
literal[string]
identifier[ap] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = literal[string] )
identifier[ap] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] )
identifier[ap] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] )
identifier[ap] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[float] , identifier[help] = literal[string] )
identifier[ap] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] , identifier[default] = literal[string] )
keyword[return] identifier[ap] . identifier[parse_args] () | def get_args():
"""
request the arguments for running
"""
ap = argparse.ArgumentParser(description='Create frames for a movie that can be compiled using ffmpeg')
ap.add_argument('start', help='date string as start time')
ap.add_argument('end', help='date string as end time')
ap.add_argument('step', type=float, help='fraction of a day to step by')
ap.add_argument('--config', help='path to a config file', default='config.json')
return ap.parse_args() |
def add_config(self, config):
"""
:param config:
:type config: dict
"""
self.pre_configure()
self.config = config
if not self.has_revision_file():
#: Create new revision file.
touch_file(self.revfile_path)
self.history.load(self.revfile_path)
self.archiver.target_path = self.dest_path
self.archiver.zip_path = self.tmp_file_path
self.state.state_path = os.path.join(
REVISION_HOME,
"clients",
self.key
)
self.state.prepare()
self.post_configure()
self.prepared = True | def function[add_config, parameter[self, config]]:
constant[
:param config:
:type config: dict
]
call[name[self].pre_configure, parameter[]]
name[self].config assign[=] name[config]
if <ast.UnaryOp object at 0x7da18f58d780> begin[:]
call[name[touch_file], parameter[name[self].revfile_path]]
call[name[self].history.load, parameter[name[self].revfile_path]]
name[self].archiver.target_path assign[=] name[self].dest_path
name[self].archiver.zip_path assign[=] name[self].tmp_file_path
name[self].state.state_path assign[=] call[name[os].path.join, parameter[name[REVISION_HOME], constant[clients], name[self].key]]
call[name[self].state.prepare, parameter[]]
call[name[self].post_configure, parameter[]]
name[self].prepared assign[=] constant[True] | keyword[def] identifier[add_config] ( identifier[self] , identifier[config] ):
literal[string]
identifier[self] . identifier[pre_configure] ()
identifier[self] . identifier[config] = identifier[config]
keyword[if] keyword[not] identifier[self] . identifier[has_revision_file] ():
identifier[touch_file] ( identifier[self] . identifier[revfile_path] )
identifier[self] . identifier[history] . identifier[load] ( identifier[self] . identifier[revfile_path] )
identifier[self] . identifier[archiver] . identifier[target_path] = identifier[self] . identifier[dest_path]
identifier[self] . identifier[archiver] . identifier[zip_path] = identifier[self] . identifier[tmp_file_path]
identifier[self] . identifier[state] . identifier[state_path] = identifier[os] . identifier[path] . identifier[join] (
identifier[REVISION_HOME] ,
literal[string] ,
identifier[self] . identifier[key]
)
identifier[self] . identifier[state] . identifier[prepare] ()
identifier[self] . identifier[post_configure] ()
identifier[self] . identifier[prepared] = keyword[True] | def add_config(self, config):
"""
:param config:
:type config: dict
"""
self.pre_configure()
self.config = config
if not self.has_revision_file():
#: Create new revision file.
touch_file(self.revfile_path) # depends on [control=['if'], data=[]]
self.history.load(self.revfile_path)
self.archiver.target_path = self.dest_path
self.archiver.zip_path = self.tmp_file_path
self.state.state_path = os.path.join(REVISION_HOME, 'clients', self.key)
self.state.prepare()
self.post_configure()
self.prepared = True |
def listify(value):
"""
Wrap the given value into a list, with the below provisions:
* If the value is a list or a tuple, it's coerced into a new list.
* If the value is None, an empty list is returned.
* Otherwise, a single-element list is returned, containing the value.
:param value: A value.
:return: a list!
:rtype: list
"""
if value is None:
return []
if isinstance(value, (list, tuple)):
return list(value)
return [value] | def function[listify, parameter[value]]:
constant[
Wrap the given value into a list, with the below provisions:
* If the value is a list or a tuple, it's coerced into a new list.
* If the value is None, an empty list is returned.
* Otherwise, a single-element list is returned, containing the value.
:param value: A value.
:return: a list!
:rtype: list
]
if compare[name[value] is constant[None]] begin[:]
return[list[[]]]
if call[name[isinstance], parameter[name[value], tuple[[<ast.Name object at 0x7da1b26aebf0>, <ast.Name object at 0x7da1b26ac250>]]]] begin[:]
return[call[name[list], parameter[name[value]]]]
return[list[[<ast.Name object at 0x7da1b26ad4b0>]]] | keyword[def] identifier[listify] ( identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return] []
keyword[if] identifier[isinstance] ( identifier[value] ,( identifier[list] , identifier[tuple] )):
keyword[return] identifier[list] ( identifier[value] )
keyword[return] [ identifier[value] ] | def listify(value):
"""
Wrap the given value into a list, with the below provisions:
* If the value is a list or a tuple, it's coerced into a new list.
* If the value is None, an empty list is returned.
* Otherwise, a single-element list is returned, containing the value.
:param value: A value.
:return: a list!
:rtype: list
"""
if value is None:
return [] # depends on [control=['if'], data=[]]
if isinstance(value, (list, tuple)):
return list(value) # depends on [control=['if'], data=[]]
return [value] |
def create_model(text_in,
labels,
timesteps,
per_example_weights,
phase=pt.Phase.train):
"""Creates a model for running baby names."""
with pt.defaults_scope(phase=phase, l2loss=0.00001):
# The embedding lookup must be placed on a cpu.
with tf.device('/cpu:0'):
embedded = text_in.embedding_lookup(CHARS, [EMBEDDING_SIZE])
# We need to cleave the sequence because sequence lstm expect each
# timestep to be in its own Tensor.
lstm = (embedded.cleave_sequence(timesteps).sequence_lstm(CHARS))
# The classifier is much more efficient if it runs across the entire
# batch at once, so we want to squash (i.e. uncleave).
#
# Hidden nodes is set to 32 because it seems to work well.
return (lstm.squash_sequence().fully_connected(32,
activation_fn=tf.nn.relu)
.dropout(0.7)
.softmax_classifier(SEXES,
labels,
per_example_weights=per_example_weights)) | def function[create_model, parameter[text_in, labels, timesteps, per_example_weights, phase]]:
constant[Creates a model for running baby names.]
with call[name[pt].defaults_scope, parameter[]] begin[:]
with call[name[tf].device, parameter[constant[/cpu:0]]] begin[:]
variable[embedded] assign[=] call[name[text_in].embedding_lookup, parameter[name[CHARS], list[[<ast.Name object at 0x7da18eb554e0>]]]]
variable[lstm] assign[=] call[call[name[embedded].cleave_sequence, parameter[name[timesteps]]].sequence_lstm, parameter[name[CHARS]]]
return[call[call[call[call[name[lstm].squash_sequence, parameter[]].fully_connected, parameter[constant[32]]].dropout, parameter[constant[0.7]]].softmax_classifier, parameter[name[SEXES], name[labels]]]] | keyword[def] identifier[create_model] ( identifier[text_in] ,
identifier[labels] ,
identifier[timesteps] ,
identifier[per_example_weights] ,
identifier[phase] = identifier[pt] . identifier[Phase] . identifier[train] ):
literal[string]
keyword[with] identifier[pt] . identifier[defaults_scope] ( identifier[phase] = identifier[phase] , identifier[l2loss] = literal[int] ):
keyword[with] identifier[tf] . identifier[device] ( literal[string] ):
identifier[embedded] = identifier[text_in] . identifier[embedding_lookup] ( identifier[CHARS] ,[ identifier[EMBEDDING_SIZE] ])
identifier[lstm] =( identifier[embedded] . identifier[cleave_sequence] ( identifier[timesteps] ). identifier[sequence_lstm] ( identifier[CHARS] ))
keyword[return] ( identifier[lstm] . identifier[squash_sequence] (). identifier[fully_connected] ( literal[int] ,
identifier[activation_fn] = identifier[tf] . identifier[nn] . identifier[relu] )
. identifier[dropout] ( literal[int] )
. identifier[softmax_classifier] ( identifier[SEXES] ,
identifier[labels] ,
identifier[per_example_weights] = identifier[per_example_weights] )) | def create_model(text_in, labels, timesteps, per_example_weights, phase=pt.Phase.train):
"""Creates a model for running baby names."""
with pt.defaults_scope(phase=phase, l2loss=1e-05):
# The embedding lookup must be placed on a cpu.
with tf.device('/cpu:0'):
embedded = text_in.embedding_lookup(CHARS, [EMBEDDING_SIZE]) # depends on [control=['with'], data=[]]
# We need to cleave the sequence because sequence lstm expect each
# timestep to be in its own Tensor.
lstm = embedded.cleave_sequence(timesteps).sequence_lstm(CHARS)
# The classifier is much more efficient if it runs across the entire
# batch at once, so we want to squash (i.e. uncleave).
#
# Hidden nodes is set to 32 because it seems to work well.
return lstm.squash_sequence().fully_connected(32, activation_fn=tf.nn.relu).dropout(0.7).softmax_classifier(SEXES, labels, per_example_weights=per_example_weights) # depends on [control=['with'], data=[]] |
def confirm_delete_view(self, request, object_id):
"""
Instantiates a class-based view to provide 'delete confirmation'
functionality for the assigned model, or redirect to Wagtail's delete
confirmation view if the assigned model extends 'Page'. The view class
used can be overridden by changing the 'confirm_delete_view_class'
attribute.
"""
kwargs = {'model_admin': self, 'object_id': object_id}
view_class = self.confirm_delete_view_class
return view_class.as_view(**kwargs)(request) | def function[confirm_delete_view, parameter[self, request, object_id]]:
constant[
Instantiates a class-based view to provide 'delete confirmation'
functionality for the assigned model, or redirect to Wagtail's delete
confirmation view if the assigned model extends 'Page'. The view class
used can be overridden by changing the 'confirm_delete_view_class'
attribute.
]
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da18f813760>, <ast.Constant object at 0x7da18f8126e0>], [<ast.Name object at 0x7da18f812860>, <ast.Name object at 0x7da18f812470>]]
variable[view_class] assign[=] name[self].confirm_delete_view_class
return[call[call[name[view_class].as_view, parameter[]], parameter[name[request]]]] | keyword[def] identifier[confirm_delete_view] ( identifier[self] , identifier[request] , identifier[object_id] ):
literal[string]
identifier[kwargs] ={ literal[string] : identifier[self] , literal[string] : identifier[object_id] }
identifier[view_class] = identifier[self] . identifier[confirm_delete_view_class]
keyword[return] identifier[view_class] . identifier[as_view] (** identifier[kwargs] )( identifier[request] ) | def confirm_delete_view(self, request, object_id):
"""
Instantiates a class-based view to provide 'delete confirmation'
functionality for the assigned model, or redirect to Wagtail's delete
confirmation view if the assigned model extends 'Page'. The view class
used can be overridden by changing the 'confirm_delete_view_class'
attribute.
"""
kwargs = {'model_admin': self, 'object_id': object_id}
view_class = self.confirm_delete_view_class
return view_class.as_view(**kwargs)(request) |
def GetHeaders(cosmos_client,
default_headers,
verb,
path,
resource_id,
resource_type,
options,
partition_key_range_id = None):
"""Gets HTTP request headers.
:param cosmos_client.CosmosClient cosmos_client:
:param dict default_headers:
:param str verb:
:param str path:
:param str resource_id:
:param str resource_type:
:param dict options:
:param str partition_key_range_id:
:return:
The HTTP request headers.
:rtype: dict
"""
headers = dict(default_headers)
options = options or {}
if cosmos_client._useMultipleWriteLocations:
headers[http_constants.HttpHeaders.AllowTentativeWrites] = "true"
pre_trigger_include = options.get('preTriggerInclude')
if pre_trigger_include:
headers[http_constants.HttpHeaders.PreTriggerInclude] = (
pre_trigger_include
if isinstance(pre_trigger_include, str)
else (',').join(pre_trigger_include))
post_trigger_include = options.get('postTriggerInclude')
if post_trigger_include:
headers[http_constants.HttpHeaders.PostTriggerInclude] = (
post_trigger_include
if isinstance(post_trigger_include, str)
else (',').join(post_trigger_include))
if options.get('maxItemCount'):
headers[http_constants.HttpHeaders.PageSize] = options['maxItemCount']
access_condition = options.get('accessCondition')
if access_condition:
if access_condition['type'] == 'IfMatch':
headers[http_constants.HttpHeaders.IfMatch] = access_condition['condition']
else:
headers[http_constants.HttpHeaders.IfNoneMatch] = access_condition['condition']
if options.get('indexingDirective'):
headers[http_constants.HttpHeaders.IndexingDirective] = (
options['indexingDirective'])
consistency_level = None
''' get default client consistency level'''
default_client_consistency_level = headers.get(http_constants.HttpHeaders.ConsistencyLevel)
''' set consistency level. check if set via options, this will
override the default '''
if options.get('consistencyLevel'):
consistency_level = options['consistencyLevel']
headers[http_constants.HttpHeaders.ConsistencyLevel] = consistency_level
elif default_client_consistency_level is not None:
consistency_level = default_client_consistency_level
headers[http_constants.HttpHeaders.ConsistencyLevel] = consistency_level
# figure out if consistency level for this request is session
is_session_consistency = (consistency_level == documents.ConsistencyLevel.Session)
# set session token if required
if is_session_consistency is True and not IsMasterResource(resource_type):
# if there is a token set via option, then use it to override default
if options.get('sessionToken'):
headers[http_constants.HttpHeaders.SessionToken] = options['sessionToken']
else:
# check if the client's default consistency is session (and request consistency level is same),
# then update from session container
if default_client_consistency_level == documents.ConsistencyLevel.Session:
# populate session token from the client's session container
headers[http_constants.HttpHeaders.SessionToken] = (
cosmos_client.session.get_session_token(path))
if options.get('enableScanInQuery'):
headers[http_constants.HttpHeaders.EnableScanInQuery] = (
options['enableScanInQuery'])
if options.get('resourceTokenExpirySeconds'):
headers[http_constants.HttpHeaders.ResourceTokenExpiry] = (
options['resourceTokenExpirySeconds'])
if options.get('offerType'):
headers[http_constants.HttpHeaders.OfferType] = options['offerType']
if options.get('offerThroughput'):
headers[http_constants.HttpHeaders.OfferThroughput] = options['offerThroughput']
if 'partitionKey' in options:
# if partitionKey value is Undefined, serialize it as {} to be consistent with other SDKs
if options.get('partitionKey') is documents.Undefined:
headers[http_constants.HttpHeaders.PartitionKey] = [{}]
# else serialize using json dumps method which apart from regular values will serialize None into null
else:
headers[http_constants.HttpHeaders.PartitionKey] = json.dumps([options['partitionKey']])
if options.get('enableCrossPartitionQuery'):
headers[http_constants.HttpHeaders.EnableCrossPartitionQuery] = options['enableCrossPartitionQuery']
if options.get('populateQueryMetrics'):
headers[http_constants.HttpHeaders.PopulateQueryMetrics] = options['populateQueryMetrics']
if cosmos_client.master_key:
headers[http_constants.HttpHeaders.XDate] = (
datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT'))
if cosmos_client.master_key or cosmos_client.resource_tokens:
authorization = auth.GetAuthorizationHeader(cosmos_client,
verb,
path,
resource_id,
IsNameBased(resource_id),
resource_type,
headers)
# urllib.quote throws when the input parameter is None
if authorization:
# -_.!~*'() are valid characters in url, and shouldn't be quoted.
authorization = urllib_quote(authorization, '-_.!~*\'()')
headers[http_constants.HttpHeaders.Authorization] = authorization
if verb == 'post' or verb == 'put':
if not headers.get(http_constants.HttpHeaders.ContentType):
headers[http_constants.HttpHeaders.ContentType] = runtime_constants.MediaTypes.Json
if not headers.get(http_constants.HttpHeaders.Accept):
headers[http_constants.HttpHeaders.Accept] = runtime_constants.MediaTypes.Json
if partition_key_range_id is not None:
headers[http_constants.HttpHeaders.PartitionKeyRangeID] = partition_key_range_id
if options.get('enableScriptLogging'):
headers[http_constants.HttpHeaders.EnableScriptLogging] = options['enableScriptLogging']
if options.get('offerEnableRUPerMinuteThroughput'):
headers[http_constants.HttpHeaders.OfferIsRUPerMinuteThroughputEnabled] = options['offerEnableRUPerMinuteThroughput']
if options.get('disableRUPerMinuteUsage'):
headers[http_constants.HttpHeaders.DisableRUPerMinuteUsage] = options['disableRUPerMinuteUsage']
if options.get('changeFeed') is True:
# On REST level, change feed is using IfNoneMatch/ETag instead of continuation.
if_none_match_value = None
if options.get('continuation'):
if_none_match_value = options['continuation']
elif options.get('isStartFromBeginning') and options['isStartFromBeginning'] == False:
if_none_match_value = '*'
if if_none_match_value:
headers[http_constants.HttpHeaders.IfNoneMatch] = if_none_match_value
headers[http_constants.HttpHeaders.AIM] = http_constants.HttpHeaders.IncrementalFeedHeaderValue
else:
if options.get('continuation'):
headers[http_constants.HttpHeaders.Continuation] = (options['continuation'])
if options.get('populatePartitionKeyRangeStatistics'):
headers[http_constants.HttpHeaders.PopulatePartitionKeyRangeStatistics] = options['populatePartitionKeyRangeStatistics']
if options.get('populateQuotaInfo'):
headers[http_constants.HttpHeaders.PopulateQuotaInfo] = options['populateQuotaInfo']
return headers | def function[GetHeaders, parameter[cosmos_client, default_headers, verb, path, resource_id, resource_type, options, partition_key_range_id]]:
constant[Gets HTTP request headers.
:param cosmos_client.CosmosClient cosmos_client:
:param dict default_headers:
:param str verb:
:param str path:
:param str resource_id:
:param str resource_type:
:param dict options:
:param str partition_key_range_id:
:return:
The HTTP request headers.
:rtype: dict
]
variable[headers] assign[=] call[name[dict], parameter[name[default_headers]]]
variable[options] assign[=] <ast.BoolOp object at 0x7da1b17c3bb0>
if name[cosmos_client]._useMultipleWriteLocations begin[:]
call[name[headers]][name[http_constants].HttpHeaders.AllowTentativeWrites] assign[=] constant[true]
variable[pre_trigger_include] assign[=] call[name[options].get, parameter[constant[preTriggerInclude]]]
if name[pre_trigger_include] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.PreTriggerInclude] assign[=] <ast.IfExp object at 0x7da1b17c1510>
variable[post_trigger_include] assign[=] call[name[options].get, parameter[constant[postTriggerInclude]]]
if name[post_trigger_include] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.PostTriggerInclude] assign[=] <ast.IfExp object at 0x7da1b17c1990>
if call[name[options].get, parameter[constant[maxItemCount]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.PageSize] assign[=] call[name[options]][constant[maxItemCount]]
variable[access_condition] assign[=] call[name[options].get, parameter[constant[accessCondition]]]
if name[access_condition] begin[:]
if compare[call[name[access_condition]][constant[type]] equal[==] constant[IfMatch]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.IfMatch] assign[=] call[name[access_condition]][constant[condition]]
if call[name[options].get, parameter[constant[indexingDirective]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.IndexingDirective] assign[=] call[name[options]][constant[indexingDirective]]
variable[consistency_level] assign[=] constant[None]
constant[ get default client consistency level]
variable[default_client_consistency_level] assign[=] call[name[headers].get, parameter[name[http_constants].HttpHeaders.ConsistencyLevel]]
constant[ set consistency level. check if set via options, this will
override the default ]
if call[name[options].get, parameter[constant[consistencyLevel]]] begin[:]
variable[consistency_level] assign[=] call[name[options]][constant[consistencyLevel]]
call[name[headers]][name[http_constants].HttpHeaders.ConsistencyLevel] assign[=] name[consistency_level]
variable[is_session_consistency] assign[=] compare[name[consistency_level] equal[==] name[documents].ConsistencyLevel.Session]
if <ast.BoolOp object at 0x7da1b17c2a10> begin[:]
if call[name[options].get, parameter[constant[sessionToken]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.SessionToken] assign[=] call[name[options]][constant[sessionToken]]
if call[name[options].get, parameter[constant[enableScanInQuery]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.EnableScanInQuery] assign[=] call[name[options]][constant[enableScanInQuery]]
if call[name[options].get, parameter[constant[resourceTokenExpirySeconds]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.ResourceTokenExpiry] assign[=] call[name[options]][constant[resourceTokenExpirySeconds]]
if call[name[options].get, parameter[constant[offerType]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.OfferType] assign[=] call[name[options]][constant[offerType]]
if call[name[options].get, parameter[constant[offerThroughput]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.OfferThroughput] assign[=] call[name[options]][constant[offerThroughput]]
if compare[constant[partitionKey] in name[options]] begin[:]
if compare[call[name[options].get, parameter[constant[partitionKey]]] is name[documents].Undefined] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.PartitionKey] assign[=] list[[<ast.Dict object at 0x7da1b17032e0>]]
if call[name[options].get, parameter[constant[enableCrossPartitionQuery]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.EnableCrossPartitionQuery] assign[=] call[name[options]][constant[enableCrossPartitionQuery]]
if call[name[options].get, parameter[constant[populateQueryMetrics]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.PopulateQueryMetrics] assign[=] call[name[options]][constant[populateQueryMetrics]]
if name[cosmos_client].master_key begin[:]
call[name[headers]][name[http_constants].HttpHeaders.XDate] assign[=] call[call[name[datetime].datetime.utcnow, parameter[]].strftime, parameter[constant[%a, %d %b %Y %H:%M:%S GMT]]]
if <ast.BoolOp object at 0x7da1b17df940> begin[:]
variable[authorization] assign[=] call[name[auth].GetAuthorizationHeader, parameter[name[cosmos_client], name[verb], name[path], name[resource_id], call[name[IsNameBased], parameter[name[resource_id]]], name[resource_type], name[headers]]]
if name[authorization] begin[:]
variable[authorization] assign[=] call[name[urllib_quote], parameter[name[authorization], constant[-_.!~*'()]]]
call[name[headers]][name[http_constants].HttpHeaders.Authorization] assign[=] name[authorization]
if <ast.BoolOp object at 0x7da1b17dea10> begin[:]
if <ast.UnaryOp object at 0x7da1b17dd4b0> begin[:]
call[name[headers]][name[http_constants].HttpHeaders.ContentType] assign[=] name[runtime_constants].MediaTypes.Json
if <ast.UnaryOp object at 0x7da1b17dfd90> begin[:]
call[name[headers]][name[http_constants].HttpHeaders.Accept] assign[=] name[runtime_constants].MediaTypes.Json
if compare[name[partition_key_range_id] is_not constant[None]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.PartitionKeyRangeID] assign[=] name[partition_key_range_id]
if call[name[options].get, parameter[constant[enableScriptLogging]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.EnableScriptLogging] assign[=] call[name[options]][constant[enableScriptLogging]]
if call[name[options].get, parameter[constant[offerEnableRUPerMinuteThroughput]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.OfferIsRUPerMinuteThroughputEnabled] assign[=] call[name[options]][constant[offerEnableRUPerMinuteThroughput]]
if call[name[options].get, parameter[constant[disableRUPerMinuteUsage]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.DisableRUPerMinuteUsage] assign[=] call[name[options]][constant[disableRUPerMinuteUsage]]
if compare[call[name[options].get, parameter[constant[changeFeed]]] is constant[True]] begin[:]
variable[if_none_match_value] assign[=] constant[None]
if call[name[options].get, parameter[constant[continuation]]] begin[:]
variable[if_none_match_value] assign[=] call[name[options]][constant[continuation]]
if name[if_none_match_value] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.IfNoneMatch] assign[=] name[if_none_match_value]
call[name[headers]][name[http_constants].HttpHeaders.AIM] assign[=] name[http_constants].HttpHeaders.IncrementalFeedHeaderValue
if call[name[options].get, parameter[constant[populatePartitionKeyRangeStatistics]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.PopulatePartitionKeyRangeStatistics] assign[=] call[name[options]][constant[populatePartitionKeyRangeStatistics]]
if call[name[options].get, parameter[constant[populateQuotaInfo]]] begin[:]
call[name[headers]][name[http_constants].HttpHeaders.PopulateQuotaInfo] assign[=] call[name[options]][constant[populateQuotaInfo]]
return[name[headers]] | keyword[def] identifier[GetHeaders] ( identifier[cosmos_client] ,
identifier[default_headers] ,
identifier[verb] ,
identifier[path] ,
identifier[resource_id] ,
identifier[resource_type] ,
identifier[options] ,
identifier[partition_key_range_id] = keyword[None] ):
literal[string]
identifier[headers] = identifier[dict] ( identifier[default_headers] )
identifier[options] = identifier[options] keyword[or] {}
keyword[if] identifier[cosmos_client] . identifier[_useMultipleWriteLocations] :
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[AllowTentativeWrites] ]= literal[string]
identifier[pre_trigger_include] = identifier[options] . identifier[get] ( literal[string] )
keyword[if] identifier[pre_trigger_include] :
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[PreTriggerInclude] ]=(
identifier[pre_trigger_include]
keyword[if] identifier[isinstance] ( identifier[pre_trigger_include] , identifier[str] )
keyword[else] ( literal[string] ). identifier[join] ( identifier[pre_trigger_include] ))
identifier[post_trigger_include] = identifier[options] . identifier[get] ( literal[string] )
keyword[if] identifier[post_trigger_include] :
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[PostTriggerInclude] ]=(
identifier[post_trigger_include]
keyword[if] identifier[isinstance] ( identifier[post_trigger_include] , identifier[str] )
keyword[else] ( literal[string] ). identifier[join] ( identifier[post_trigger_include] ))
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[PageSize] ]= identifier[options] [ literal[string] ]
identifier[access_condition] = identifier[options] . identifier[get] ( literal[string] )
keyword[if] identifier[access_condition] :
keyword[if] identifier[access_condition] [ literal[string] ]== literal[string] :
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[IfMatch] ]= identifier[access_condition] [ literal[string] ]
keyword[else] :
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[IfNoneMatch] ]= identifier[access_condition] [ literal[string] ]
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[IndexingDirective] ]=(
identifier[options] [ literal[string] ])
identifier[consistency_level] = keyword[None]
literal[string]
identifier[default_client_consistency_level] = identifier[headers] . identifier[get] ( identifier[http_constants] . identifier[HttpHeaders] . identifier[ConsistencyLevel] )
literal[string]
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[consistency_level] = identifier[options] [ literal[string] ]
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[ConsistencyLevel] ]= identifier[consistency_level]
keyword[elif] identifier[default_client_consistency_level] keyword[is] keyword[not] keyword[None] :
identifier[consistency_level] = identifier[default_client_consistency_level]
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[ConsistencyLevel] ]= identifier[consistency_level]
identifier[is_session_consistency] =( identifier[consistency_level] == identifier[documents] . identifier[ConsistencyLevel] . identifier[Session] )
keyword[if] identifier[is_session_consistency] keyword[is] keyword[True] keyword[and] keyword[not] identifier[IsMasterResource] ( identifier[resource_type] ):
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[SessionToken] ]= identifier[options] [ literal[string] ]
keyword[else] :
keyword[if] identifier[default_client_consistency_level] == identifier[documents] . identifier[ConsistencyLevel] . identifier[Session] :
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[SessionToken] ]=(
identifier[cosmos_client] . identifier[session] . identifier[get_session_token] ( identifier[path] ))
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[EnableScanInQuery] ]=(
identifier[options] [ literal[string] ])
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[ResourceTokenExpiry] ]=(
identifier[options] [ literal[string] ])
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[OfferType] ]= identifier[options] [ literal[string] ]
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[OfferThroughput] ]= identifier[options] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[options] :
keyword[if] identifier[options] . identifier[get] ( literal[string] ) keyword[is] identifier[documents] . identifier[Undefined] :
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[PartitionKey] ]=[{}]
keyword[else] :
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[PartitionKey] ]= identifier[json] . identifier[dumps] ([ identifier[options] [ literal[string] ]])
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[EnableCrossPartitionQuery] ]= identifier[options] [ literal[string] ]
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[PopulateQueryMetrics] ]= identifier[options] [ literal[string] ]
keyword[if] identifier[cosmos_client] . identifier[master_key] :
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[XDate] ]=(
identifier[datetime] . identifier[datetime] . identifier[utcnow] (). identifier[strftime] ( literal[string] ))
keyword[if] identifier[cosmos_client] . identifier[master_key] keyword[or] identifier[cosmos_client] . identifier[resource_tokens] :
identifier[authorization] = identifier[auth] . identifier[GetAuthorizationHeader] ( identifier[cosmos_client] ,
identifier[verb] ,
identifier[path] ,
identifier[resource_id] ,
identifier[IsNameBased] ( identifier[resource_id] ),
identifier[resource_type] ,
identifier[headers] )
keyword[if] identifier[authorization] :
identifier[authorization] = identifier[urllib_quote] ( identifier[authorization] , literal[string] )
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[Authorization] ]= identifier[authorization]
keyword[if] identifier[verb] == literal[string] keyword[or] identifier[verb] == literal[string] :
keyword[if] keyword[not] identifier[headers] . identifier[get] ( identifier[http_constants] . identifier[HttpHeaders] . identifier[ContentType] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[ContentType] ]= identifier[runtime_constants] . identifier[MediaTypes] . identifier[Json]
keyword[if] keyword[not] identifier[headers] . identifier[get] ( identifier[http_constants] . identifier[HttpHeaders] . identifier[Accept] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[Accept] ]= identifier[runtime_constants] . identifier[MediaTypes] . identifier[Json]
keyword[if] identifier[partition_key_range_id] keyword[is] keyword[not] keyword[None] :
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[PartitionKeyRangeID] ]= identifier[partition_key_range_id]
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[EnableScriptLogging] ]= identifier[options] [ literal[string] ]
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[OfferIsRUPerMinuteThroughputEnabled] ]= identifier[options] [ literal[string] ]
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[DisableRUPerMinuteUsage] ]= identifier[options] [ literal[string] ]
keyword[if] identifier[options] . identifier[get] ( literal[string] ) keyword[is] keyword[True] :
identifier[if_none_match_value] = keyword[None]
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[if_none_match_value] = identifier[options] [ literal[string] ]
keyword[elif] identifier[options] . identifier[get] ( literal[string] ) keyword[and] identifier[options] [ literal[string] ]== keyword[False] :
identifier[if_none_match_value] = literal[string]
keyword[if] identifier[if_none_match_value] :
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[IfNoneMatch] ]= identifier[if_none_match_value]
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[AIM] ]= identifier[http_constants] . identifier[HttpHeaders] . identifier[IncrementalFeedHeaderValue]
keyword[else] :
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[Continuation] ]=( identifier[options] [ literal[string] ])
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[PopulatePartitionKeyRangeStatistics] ]= identifier[options] [ literal[string] ]
keyword[if] identifier[options] . identifier[get] ( literal[string] ):
identifier[headers] [ identifier[http_constants] . identifier[HttpHeaders] . identifier[PopulateQuotaInfo] ]= identifier[options] [ literal[string] ]
keyword[return] identifier[headers] | def GetHeaders(cosmos_client, default_headers, verb, path, resource_id, resource_type, options, partition_key_range_id=None):
"""Gets HTTP request headers.
:param cosmos_client.CosmosClient cosmos_client:
:param dict default_headers:
:param str verb:
:param str path:
:param str resource_id:
:param str resource_type:
:param dict options:
:param str partition_key_range_id:
:return:
The HTTP request headers.
:rtype: dict
"""
headers = dict(default_headers)
options = options or {}
if cosmos_client._useMultipleWriteLocations:
headers[http_constants.HttpHeaders.AllowTentativeWrites] = 'true' # depends on [control=['if'], data=[]]
pre_trigger_include = options.get('preTriggerInclude')
if pre_trigger_include:
headers[http_constants.HttpHeaders.PreTriggerInclude] = pre_trigger_include if isinstance(pre_trigger_include, str) else ','.join(pre_trigger_include) # depends on [control=['if'], data=[]]
post_trigger_include = options.get('postTriggerInclude')
if post_trigger_include:
headers[http_constants.HttpHeaders.PostTriggerInclude] = post_trigger_include if isinstance(post_trigger_include, str) else ','.join(post_trigger_include) # depends on [control=['if'], data=[]]
if options.get('maxItemCount'):
headers[http_constants.HttpHeaders.PageSize] = options['maxItemCount'] # depends on [control=['if'], data=[]]
access_condition = options.get('accessCondition')
if access_condition:
if access_condition['type'] == 'IfMatch':
headers[http_constants.HttpHeaders.IfMatch] = access_condition['condition'] # depends on [control=['if'], data=[]]
else:
headers[http_constants.HttpHeaders.IfNoneMatch] = access_condition['condition'] # depends on [control=['if'], data=[]]
if options.get('indexingDirective'):
headers[http_constants.HttpHeaders.IndexingDirective] = options['indexingDirective'] # depends on [control=['if'], data=[]]
consistency_level = None
' get default client consistency level'
default_client_consistency_level = headers.get(http_constants.HttpHeaders.ConsistencyLevel)
' set consistency level. check if set via options, this will \n override the default '
if options.get('consistencyLevel'):
consistency_level = options['consistencyLevel']
headers[http_constants.HttpHeaders.ConsistencyLevel] = consistency_level # depends on [control=['if'], data=[]]
elif default_client_consistency_level is not None:
consistency_level = default_client_consistency_level
headers[http_constants.HttpHeaders.ConsistencyLevel] = consistency_level # depends on [control=['if'], data=['default_client_consistency_level']]
# figure out if consistency level for this request is session
is_session_consistency = consistency_level == documents.ConsistencyLevel.Session
# set session token if required
if is_session_consistency is True and (not IsMasterResource(resource_type)):
# if there is a token set via option, then use it to override default
if options.get('sessionToken'):
headers[http_constants.HttpHeaders.SessionToken] = options['sessionToken'] # depends on [control=['if'], data=[]] # check if the client's default consistency is session (and request consistency level is same),
# then update from session container
elif default_client_consistency_level == documents.ConsistencyLevel.Session:
# populate session token from the client's session container
headers[http_constants.HttpHeaders.SessionToken] = cosmos_client.session.get_session_token(path) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if options.get('enableScanInQuery'):
headers[http_constants.HttpHeaders.EnableScanInQuery] = options['enableScanInQuery'] # depends on [control=['if'], data=[]]
if options.get('resourceTokenExpirySeconds'):
headers[http_constants.HttpHeaders.ResourceTokenExpiry] = options['resourceTokenExpirySeconds'] # depends on [control=['if'], data=[]]
if options.get('offerType'):
headers[http_constants.HttpHeaders.OfferType] = options['offerType'] # depends on [control=['if'], data=[]]
if options.get('offerThroughput'):
headers[http_constants.HttpHeaders.OfferThroughput] = options['offerThroughput'] # depends on [control=['if'], data=[]]
if 'partitionKey' in options:
# if partitionKey value is Undefined, serialize it as {} to be consistent with other SDKs
if options.get('partitionKey') is documents.Undefined:
headers[http_constants.HttpHeaders.PartitionKey] = [{}] # depends on [control=['if'], data=[]]
else:
# else serialize using json dumps method which apart from regular values will serialize None into null
headers[http_constants.HttpHeaders.PartitionKey] = json.dumps([options['partitionKey']]) # depends on [control=['if'], data=['options']]
if options.get('enableCrossPartitionQuery'):
headers[http_constants.HttpHeaders.EnableCrossPartitionQuery] = options['enableCrossPartitionQuery'] # depends on [control=['if'], data=[]]
if options.get('populateQueryMetrics'):
headers[http_constants.HttpHeaders.PopulateQueryMetrics] = options['populateQueryMetrics'] # depends on [control=['if'], data=[]]
if cosmos_client.master_key:
headers[http_constants.HttpHeaders.XDate] = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') # depends on [control=['if'], data=[]]
if cosmos_client.master_key or cosmos_client.resource_tokens:
authorization = auth.GetAuthorizationHeader(cosmos_client, verb, path, resource_id, IsNameBased(resource_id), resource_type, headers)
# urllib.quote throws when the input parameter is None
if authorization:
# -_.!~*'() are valid characters in url, and shouldn't be quoted.
authorization = urllib_quote(authorization, "-_.!~*'()") # depends on [control=['if'], data=[]]
headers[http_constants.HttpHeaders.Authorization] = authorization # depends on [control=['if'], data=[]]
if verb == 'post' or verb == 'put':
if not headers.get(http_constants.HttpHeaders.ContentType):
headers[http_constants.HttpHeaders.ContentType] = runtime_constants.MediaTypes.Json # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not headers.get(http_constants.HttpHeaders.Accept):
headers[http_constants.HttpHeaders.Accept] = runtime_constants.MediaTypes.Json # depends on [control=['if'], data=[]]
if partition_key_range_id is not None:
headers[http_constants.HttpHeaders.PartitionKeyRangeID] = partition_key_range_id # depends on [control=['if'], data=['partition_key_range_id']]
if options.get('enableScriptLogging'):
headers[http_constants.HttpHeaders.EnableScriptLogging] = options['enableScriptLogging'] # depends on [control=['if'], data=[]]
if options.get('offerEnableRUPerMinuteThroughput'):
headers[http_constants.HttpHeaders.OfferIsRUPerMinuteThroughputEnabled] = options['offerEnableRUPerMinuteThroughput'] # depends on [control=['if'], data=[]]
if options.get('disableRUPerMinuteUsage'):
headers[http_constants.HttpHeaders.DisableRUPerMinuteUsage] = options['disableRUPerMinuteUsage'] # depends on [control=['if'], data=[]]
if options.get('changeFeed') is True:
# On REST level, change feed is using IfNoneMatch/ETag instead of continuation.
if_none_match_value = None
if options.get('continuation'):
if_none_match_value = options['continuation'] # depends on [control=['if'], data=[]]
elif options.get('isStartFromBeginning') and options['isStartFromBeginning'] == False:
if_none_match_value = '*' # depends on [control=['if'], data=[]]
if if_none_match_value:
headers[http_constants.HttpHeaders.IfNoneMatch] = if_none_match_value # depends on [control=['if'], data=[]]
headers[http_constants.HttpHeaders.AIM] = http_constants.HttpHeaders.IncrementalFeedHeaderValue # depends on [control=['if'], data=[]]
elif options.get('continuation'):
headers[http_constants.HttpHeaders.Continuation] = options['continuation'] # depends on [control=['if'], data=[]]
if options.get('populatePartitionKeyRangeStatistics'):
headers[http_constants.HttpHeaders.PopulatePartitionKeyRangeStatistics] = options['populatePartitionKeyRangeStatistics'] # depends on [control=['if'], data=[]]
if options.get('populateQuotaInfo'):
headers[http_constants.HttpHeaders.PopulateQuotaInfo] = options['populateQuotaInfo'] # depends on [control=['if'], data=[]]
return headers |
def add_members(self, rtcs):
'''Add other RT Components to this composite component as members.
This component must be a composite component.
'''
if not self.is_composite:
raise exceptions.NotCompositeError(self.name)
for rtc in rtcs:
if self.is_member(rtc):
raise exceptions.AlreadyInCompositionError(self.name, rtc.instance_name)
org = self.organisations[0].obj
org.add_members([x.object for x in rtcs])
# Force a reparse of the member information
self._orgs = [] | def function[add_members, parameter[self, rtcs]]:
constant[Add other RT Components to this composite component as members.
This component must be a composite component.
]
if <ast.UnaryOp object at 0x7da18f09e1d0> begin[:]
<ast.Raise object at 0x7da18f09d600>
for taget[name[rtc]] in starred[name[rtcs]] begin[:]
if call[name[self].is_member, parameter[name[rtc]]] begin[:]
<ast.Raise object at 0x7da18f09d4e0>
variable[org] assign[=] call[name[self].organisations][constant[0]].obj
call[name[org].add_members, parameter[<ast.ListComp object at 0x7da18f09e830>]]
name[self]._orgs assign[=] list[[]] | keyword[def] identifier[add_members] ( identifier[self] , identifier[rtcs] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_composite] :
keyword[raise] identifier[exceptions] . identifier[NotCompositeError] ( identifier[self] . identifier[name] )
keyword[for] identifier[rtc] keyword[in] identifier[rtcs] :
keyword[if] identifier[self] . identifier[is_member] ( identifier[rtc] ):
keyword[raise] identifier[exceptions] . identifier[AlreadyInCompositionError] ( identifier[self] . identifier[name] , identifier[rtc] . identifier[instance_name] )
identifier[org] = identifier[self] . identifier[organisations] [ literal[int] ]. identifier[obj]
identifier[org] . identifier[add_members] ([ identifier[x] . identifier[object] keyword[for] identifier[x] keyword[in] identifier[rtcs] ])
identifier[self] . identifier[_orgs] =[] | def add_members(self, rtcs):
"""Add other RT Components to this composite component as members.
This component must be a composite component.
"""
if not self.is_composite:
raise exceptions.NotCompositeError(self.name) # depends on [control=['if'], data=[]]
for rtc in rtcs:
if self.is_member(rtc):
raise exceptions.AlreadyInCompositionError(self.name, rtc.instance_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rtc']]
org = self.organisations[0].obj
org.add_members([x.object for x in rtcs])
# Force a reparse of the member information
self._orgs = [] |
def every(minutes=NOTSET, seconds=NOTSET):
"""
method will been called every minutes or seconds
"""
def wrapper(func):
# mark the function with variable 'is_cronjob=True', the function would be
# collected into the list Handler._cron_jobs by meta class
func.is_cronjob = True
# collect interval and unify to seconds, it's used in meta class. See the
# comments in meta class.
func.tick = minutes * 60 + seconds
return func
if inspect.isfunction(minutes):
func = minutes
minutes = 1
seconds = 0
return wrapper(func)
if minutes is NOTSET:
if seconds is NOTSET:
minutes = 1
seconds = 0
else:
minutes = 0
if seconds is NOTSET:
seconds = 0
return wrapper | def function[every, parameter[minutes, seconds]]:
constant[
method will been called every minutes or seconds
]
def function[wrapper, parameter[func]]:
name[func].is_cronjob assign[=] constant[True]
name[func].tick assign[=] binary_operation[binary_operation[name[minutes] * constant[60]] + name[seconds]]
return[name[func]]
if call[name[inspect].isfunction, parameter[name[minutes]]] begin[:]
variable[func] assign[=] name[minutes]
variable[minutes] assign[=] constant[1]
variable[seconds] assign[=] constant[0]
return[call[name[wrapper], parameter[name[func]]]]
if compare[name[minutes] is name[NOTSET]] begin[:]
if compare[name[seconds] is name[NOTSET]] begin[:]
variable[minutes] assign[=] constant[1]
variable[seconds] assign[=] constant[0]
if compare[name[seconds] is name[NOTSET]] begin[:]
variable[seconds] assign[=] constant[0]
return[name[wrapper]] | keyword[def] identifier[every] ( identifier[minutes] = identifier[NOTSET] , identifier[seconds] = identifier[NOTSET] ):
literal[string]
keyword[def] identifier[wrapper] ( identifier[func] ):
identifier[func] . identifier[is_cronjob] = keyword[True]
identifier[func] . identifier[tick] = identifier[minutes] * literal[int] + identifier[seconds]
keyword[return] identifier[func]
keyword[if] identifier[inspect] . identifier[isfunction] ( identifier[minutes] ):
identifier[func] = identifier[minutes]
identifier[minutes] = literal[int]
identifier[seconds] = literal[int]
keyword[return] identifier[wrapper] ( identifier[func] )
keyword[if] identifier[minutes] keyword[is] identifier[NOTSET] :
keyword[if] identifier[seconds] keyword[is] identifier[NOTSET] :
identifier[minutes] = literal[int]
identifier[seconds] = literal[int]
keyword[else] :
identifier[minutes] = literal[int]
keyword[if] identifier[seconds] keyword[is] identifier[NOTSET] :
identifier[seconds] = literal[int]
keyword[return] identifier[wrapper] | def every(minutes=NOTSET, seconds=NOTSET):
"""
method will been called every minutes or seconds
"""
def wrapper(func):
# mark the function with variable 'is_cronjob=True', the function would be
# collected into the list Handler._cron_jobs by meta class
func.is_cronjob = True
# collect interval and unify to seconds, it's used in meta class. See the
# comments in meta class.
func.tick = minutes * 60 + seconds
return func
if inspect.isfunction(minutes):
func = minutes
minutes = 1
seconds = 0
return wrapper(func) # depends on [control=['if'], data=[]]
if minutes is NOTSET:
if seconds is NOTSET:
minutes = 1
seconds = 0 # depends on [control=['if'], data=['seconds']]
else:
minutes = 0 # depends on [control=['if'], data=['minutes', 'NOTSET']]
if seconds is NOTSET:
seconds = 0 # depends on [control=['if'], data=['seconds']]
return wrapper |
def rate_unstable(self):
"""Returns an unstable rate based on the last two entries in the timing data. Less intensive to compute."""
if not self.started or self.stalled:
return 0.0
x1, y1 = self._timing_data[-2]
x2, y2 = self._timing_data[-1]
return (y2 - y1) / (x2 - x1) | def function[rate_unstable, parameter[self]]:
constant[Returns an unstable rate based on the last two entries in the timing data. Less intensive to compute.]
if <ast.BoolOp object at 0x7da18f810250> begin[:]
return[constant[0.0]]
<ast.Tuple object at 0x7da18f09dff0> assign[=] call[name[self]._timing_data][<ast.UnaryOp object at 0x7da18f09e8f0>]
<ast.Tuple object at 0x7da18f09fbe0> assign[=] call[name[self]._timing_data][<ast.UnaryOp object at 0x7da18f09c3a0>]
return[binary_operation[binary_operation[name[y2] - name[y1]] / binary_operation[name[x2] - name[x1]]]] | keyword[def] identifier[rate_unstable] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[started] keyword[or] identifier[self] . identifier[stalled] :
keyword[return] literal[int]
identifier[x1] , identifier[y1] = identifier[self] . identifier[_timing_data] [- literal[int] ]
identifier[x2] , identifier[y2] = identifier[self] . identifier[_timing_data] [- literal[int] ]
keyword[return] ( identifier[y2] - identifier[y1] )/( identifier[x2] - identifier[x1] ) | def rate_unstable(self):
"""Returns an unstable rate based on the last two entries in the timing data. Less intensive to compute."""
if not self.started or self.stalled:
return 0.0 # depends on [control=['if'], data=[]]
(x1, y1) = self._timing_data[-2]
(x2, y2) = self._timing_data[-1]
return (y2 - y1) / (x2 - x1) |
def run_oldstyle(self):
'''
Make the salt client call in old-style all-in-one call method
'''
arg = [self._load_files(), self.opts['dest']]
local = salt.client.get_local_client(self.opts['conf_file'])
args = [self.opts['tgt'],
'cp.recv',
arg,
self.opts['timeout'],
]
selected_target_option = self.opts.get('selected_target_option', None)
if selected_target_option is not None:
args.append(selected_target_option)
return local.cmd(*args) | def function[run_oldstyle, parameter[self]]:
constant[
Make the salt client call in old-style all-in-one call method
]
variable[arg] assign[=] list[[<ast.Call object at 0x7da1b215e530>, <ast.Subscript object at 0x7da1b215c340>]]
variable[local] assign[=] call[name[salt].client.get_local_client, parameter[call[name[self].opts][constant[conf_file]]]]
variable[args] assign[=] list[[<ast.Subscript object at 0x7da1b215c0a0>, <ast.Constant object at 0x7da1b215f3a0>, <ast.Name object at 0x7da1b215e050>, <ast.Subscript object at 0x7da1b215c820>]]
variable[selected_target_option] assign[=] call[name[self].opts.get, parameter[constant[selected_target_option], constant[None]]]
if compare[name[selected_target_option] is_not constant[None]] begin[:]
call[name[args].append, parameter[name[selected_target_option]]]
return[call[name[local].cmd, parameter[<ast.Starred object at 0x7da1b215cc40>]]] | keyword[def] identifier[run_oldstyle] ( identifier[self] ):
literal[string]
identifier[arg] =[ identifier[self] . identifier[_load_files] (), identifier[self] . identifier[opts] [ literal[string] ]]
identifier[local] = identifier[salt] . identifier[client] . identifier[get_local_client] ( identifier[self] . identifier[opts] [ literal[string] ])
identifier[args] =[ identifier[self] . identifier[opts] [ literal[string] ],
literal[string] ,
identifier[arg] ,
identifier[self] . identifier[opts] [ literal[string] ],
]
identifier[selected_target_option] = identifier[self] . identifier[opts] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[selected_target_option] keyword[is] keyword[not] keyword[None] :
identifier[args] . identifier[append] ( identifier[selected_target_option] )
keyword[return] identifier[local] . identifier[cmd] (* identifier[args] ) | def run_oldstyle(self):
"""
Make the salt client call in old-style all-in-one call method
"""
arg = [self._load_files(), self.opts['dest']]
local = salt.client.get_local_client(self.opts['conf_file'])
args = [self.opts['tgt'], 'cp.recv', arg, self.opts['timeout']]
selected_target_option = self.opts.get('selected_target_option', None)
if selected_target_option is not None:
args.append(selected_target_option) # depends on [control=['if'], data=['selected_target_option']]
return local.cmd(*args) |
def open_files(subseqs):
"""Open file statements."""
print(' . open_files')
lines = Lines()
lines.add(1, 'cpdef open_files(self, int idx):')
for seq in subseqs:
lines.add(2, 'if self._%s_diskflag:' % seq.name)
lines.add(3, 'self._%s_file = fopen(str(self._%s_path).encode(), '
'"rb+")' % (2*(seq.name,)))
if seq.NDIM == 0:
lines.add(3,
'fseek(self._%s_file, idx*8, SEEK_SET)' % seq.name)
else:
lines.add(3, 'fseek(self._%s_file, idx*self._%s_length*8, '
'SEEK_SET)' % (2*(seq.name,)))
return lines | def function[open_files, parameter[subseqs]]:
constant[Open file statements.]
call[name[print], parameter[constant[ . open_files]]]
variable[lines] assign[=] call[name[Lines], parameter[]]
call[name[lines].add, parameter[constant[1], constant[cpdef open_files(self, int idx):]]]
for taget[name[seq]] in starred[name[subseqs]] begin[:]
call[name[lines].add, parameter[constant[2], binary_operation[constant[if self._%s_diskflag:] <ast.Mod object at 0x7da2590d6920> name[seq].name]]]
call[name[lines].add, parameter[constant[3], binary_operation[constant[self._%s_file = fopen(str(self._%s_path).encode(), "rb+")] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[2] * tuple[[<ast.Attribute object at 0x7da1b0fedc90>]]]]]]
if compare[name[seq].NDIM equal[==] constant[0]] begin[:]
call[name[lines].add, parameter[constant[3], binary_operation[constant[fseek(self._%s_file, idx*8, SEEK_SET)] <ast.Mod object at 0x7da2590d6920> name[seq].name]]]
return[name[lines]] | keyword[def] identifier[open_files] ( identifier[subseqs] ):
literal[string]
identifier[print] ( literal[string] )
identifier[lines] = identifier[Lines] ()
identifier[lines] . identifier[add] ( literal[int] , literal[string] )
keyword[for] identifier[seq] keyword[in] identifier[subseqs] :
identifier[lines] . identifier[add] ( literal[int] , literal[string] % identifier[seq] . identifier[name] )
identifier[lines] . identifier[add] ( literal[int] , literal[string]
literal[string] %( literal[int] *( identifier[seq] . identifier[name] ,)))
keyword[if] identifier[seq] . identifier[NDIM] == literal[int] :
identifier[lines] . identifier[add] ( literal[int] ,
literal[string] % identifier[seq] . identifier[name] )
keyword[else] :
identifier[lines] . identifier[add] ( literal[int] , literal[string]
literal[string] %( literal[int] *( identifier[seq] . identifier[name] ,)))
keyword[return] identifier[lines] | def open_files(subseqs):
"""Open file statements."""
print(' . open_files')
lines = Lines()
lines.add(1, 'cpdef open_files(self, int idx):')
for seq in subseqs:
lines.add(2, 'if self._%s_diskflag:' % seq.name)
lines.add(3, 'self._%s_file = fopen(str(self._%s_path).encode(), "rb+")' % (2 * (seq.name,)))
if seq.NDIM == 0:
lines.add(3, 'fseek(self._%s_file, idx*8, SEEK_SET)' % seq.name) # depends on [control=['if'], data=[]]
else:
lines.add(3, 'fseek(self._%s_file, idx*self._%s_length*8, SEEK_SET)' % (2 * (seq.name,))) # depends on [control=['for'], data=['seq']]
return lines |
def _peek(self, n): # type: (int) -> str
"""
Peeks ahead n characters.
n is the max number of characters that will be peeked.
"""
# we always want to restore after exiting this scope
with self._state(restore=True):
buf = ""
for _ in range(n):
if self._current not in " \t\n\r#,]}":
buf += self._current
self.inc()
continue
break
return buf | def function[_peek, parameter[self, n]]:
constant[
Peeks ahead n characters.
n is the max number of characters that will be peeked.
]
with call[name[self]._state, parameter[]] begin[:]
variable[buf] assign[=] constant[]
for taget[name[_]] in starred[call[name[range], parameter[name[n]]]] begin[:]
if compare[name[self]._current <ast.NotIn object at 0x7da2590d7190> constant[
#,]}]] begin[:]
<ast.AugAssign object at 0x7da18bc73cd0>
call[name[self].inc, parameter[]]
continue
break
return[name[buf]] | keyword[def] identifier[_peek] ( identifier[self] , identifier[n] ):
literal[string]
keyword[with] identifier[self] . identifier[_state] ( identifier[restore] = keyword[True] ):
identifier[buf] = literal[string]
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[n] ):
keyword[if] identifier[self] . identifier[_current] keyword[not] keyword[in] literal[string] :
identifier[buf] += identifier[self] . identifier[_current]
identifier[self] . identifier[inc] ()
keyword[continue]
keyword[break]
keyword[return] identifier[buf] | def _peek(self, n): # type: (int) -> str
'\n Peeks ahead n characters.\n\n n is the max number of characters that will be peeked.\n '
# we always want to restore after exiting this scope
with self._state(restore=True):
buf = ''
for _ in range(n):
if self._current not in ' \t\n\r#,]}':
buf += self._current
self.inc()
continue # depends on [control=['if'], data=[]]
break # depends on [control=['for'], data=[]]
return buf # depends on [control=['with'], data=[]] |
def reset_to_last_commit():
"""reset a modified file to his last commit status
This method does the same than a ::
$ git reset --hard
Keyword Arguments:
<none>
Returns:
<nothing>
"""
try:
repo = Repo()
gitcmd = repo.git
gitcmd.reset(hard=True)
except Exception:
pass | def function[reset_to_last_commit, parameter[]]:
constant[reset a modified file to his last commit status
This method does the same than a ::
$ git reset --hard
Keyword Arguments:
<none>
Returns:
<nothing>
]
<ast.Try object at 0x7da1b28fdcf0> | keyword[def] identifier[reset_to_last_commit] ():
literal[string]
keyword[try] :
identifier[repo] = identifier[Repo] ()
identifier[gitcmd] = identifier[repo] . identifier[git]
identifier[gitcmd] . identifier[reset] ( identifier[hard] = keyword[True] )
keyword[except] identifier[Exception] :
keyword[pass] | def reset_to_last_commit():
"""reset a modified file to his last commit status
This method does the same than a ::
$ git reset --hard
Keyword Arguments:
<none>
Returns:
<nothing>
"""
try:
repo = Repo()
gitcmd = repo.git
gitcmd.reset(hard=True) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]] |
def Prep(self, size, additionalBytes):
"""
Prep prepares to write an element of `size` after `additional_bytes`
have been written, e.g. if you write a string, you need to align
such the int length field is aligned to SizeInt32, and the string
data follows it directly.
If all you need to do is align, `additionalBytes` will be 0.
"""
# Track the biggest thing we've ever aligned to.
if size > self.minalign:
self.minalign = size
# Find the amount of alignment needed such that `size` is properly
# aligned after `additionalBytes`:
alignSize = (~(len(self.Bytes) - self.Head() + additionalBytes)) + 1
alignSize &= (size - 1)
# Reallocate the buffer if needed:
while self.Head() < alignSize+size+additionalBytes:
oldBufSize = len(self.Bytes)
self.growByteBuffer()
updated_head = self.head + len(self.Bytes) - oldBufSize
self.head = UOffsetTFlags.py_type(updated_head)
self.Pad(alignSize) | def function[Prep, parameter[self, size, additionalBytes]]:
constant[
Prep prepares to write an element of `size` after `additional_bytes`
have been written, e.g. if you write a string, you need to align
such the int length field is aligned to SizeInt32, and the string
data follows it directly.
If all you need to do is align, `additionalBytes` will be 0.
]
if compare[name[size] greater[>] name[self].minalign] begin[:]
name[self].minalign assign[=] name[size]
variable[alignSize] assign[=] binary_operation[<ast.UnaryOp object at 0x7da20e961f30> + constant[1]]
<ast.AugAssign object at 0x7da20e960610>
while compare[call[name[self].Head, parameter[]] less[<] binary_operation[binary_operation[name[alignSize] + name[size]] + name[additionalBytes]]] begin[:]
variable[oldBufSize] assign[=] call[name[len], parameter[name[self].Bytes]]
call[name[self].growByteBuffer, parameter[]]
variable[updated_head] assign[=] binary_operation[binary_operation[name[self].head + call[name[len], parameter[name[self].Bytes]]] - name[oldBufSize]]
name[self].head assign[=] call[name[UOffsetTFlags].py_type, parameter[name[updated_head]]]
call[name[self].Pad, parameter[name[alignSize]]] | keyword[def] identifier[Prep] ( identifier[self] , identifier[size] , identifier[additionalBytes] ):
literal[string]
keyword[if] identifier[size] > identifier[self] . identifier[minalign] :
identifier[self] . identifier[minalign] = identifier[size]
identifier[alignSize] =(~( identifier[len] ( identifier[self] . identifier[Bytes] )- identifier[self] . identifier[Head] ()+ identifier[additionalBytes] ))+ literal[int]
identifier[alignSize] &=( identifier[size] - literal[int] )
keyword[while] identifier[self] . identifier[Head] ()< identifier[alignSize] + identifier[size] + identifier[additionalBytes] :
identifier[oldBufSize] = identifier[len] ( identifier[self] . identifier[Bytes] )
identifier[self] . identifier[growByteBuffer] ()
identifier[updated_head] = identifier[self] . identifier[head] + identifier[len] ( identifier[self] . identifier[Bytes] )- identifier[oldBufSize]
identifier[self] . identifier[head] = identifier[UOffsetTFlags] . identifier[py_type] ( identifier[updated_head] )
identifier[self] . identifier[Pad] ( identifier[alignSize] ) | def Prep(self, size, additionalBytes):
"""
Prep prepares to write an element of `size` after `additional_bytes`
have been written, e.g. if you write a string, you need to align
such the int length field is aligned to SizeInt32, and the string
data follows it directly.
If all you need to do is align, `additionalBytes` will be 0.
"""
# Track the biggest thing we've ever aligned to.
if size > self.minalign:
self.minalign = size # depends on [control=['if'], data=['size']]
# Find the amount of alignment needed such that `size` is properly
# aligned after `additionalBytes`:
alignSize = ~(len(self.Bytes) - self.Head() + additionalBytes) + 1
alignSize &= size - 1
# Reallocate the buffer if needed:
while self.Head() < alignSize + size + additionalBytes:
oldBufSize = len(self.Bytes)
self.growByteBuffer()
updated_head = self.head + len(self.Bytes) - oldBufSize
self.head = UOffsetTFlags.py_type(updated_head) # depends on [control=['while'], data=[]]
self.Pad(alignSize) |
def get_sections_2d(self):
"""Get 2-D list of sections and hdrgos sets actually used in grouping."""
sections_hdrgos_act = []
hdrgos_act_all = self.get_hdrgos() # Header GOs actually used to group
hdrgos_act_secs = set()
if self.hdrobj.sections:
for section_name, hdrgos_all_lst in self.hdrobj.sections:
# print("GGGGGGGGGGGGGGGGG {N:3} {NAME}".format(N=len(hdrgos_all_lst), NAME=section_name))
hdrgos_all_set = set(hdrgos_all_lst)
hdrgos_act_set = hdrgos_all_set.intersection(hdrgos_act_all)
if hdrgos_act_set:
hdrgos_act_secs |= hdrgos_act_set
# Use original order of header GOs found in sections
hdrgos_act_lst = []
hdrgos_act_ctr = cx.Counter()
for hdrgo_p in hdrgos_all_lst: # Header GO that may or may not be used.
if hdrgo_p in hdrgos_act_set and hdrgos_act_ctr[hdrgo_p] == 0:
hdrgos_act_lst.append(hdrgo_p)
hdrgos_act_ctr[hdrgo_p] += 1
sections_hdrgos_act.append((section_name, hdrgos_act_lst))
# print(">>>>>>>>>>>>>>> hdrgos_act_all {N:3}".format(N=len(hdrgos_act_all)))
# print(">>>>>>>>>>>>>>> hdrgos_act_secs {N:3}".format(N=len(hdrgos_act_secs)))
hdrgos_act_rem = hdrgos_act_all.difference(hdrgos_act_secs)
if hdrgos_act_rem:
# print("RRRRRRRRRRR {N:3}".format(N=len(hdrgos_act_rem)))
sections_hdrgos_act.append((self.hdrobj.secdflt, hdrgos_act_rem))
else:
sections_hdrgos_act.append((self.hdrobj.secdflt, hdrgos_act_all))
return sections_hdrgos_act | def function[get_sections_2d, parameter[self]]:
constant[Get 2-D list of sections and hdrgos sets actually used in grouping.]
variable[sections_hdrgos_act] assign[=] list[[]]
variable[hdrgos_act_all] assign[=] call[name[self].get_hdrgos, parameter[]]
variable[hdrgos_act_secs] assign[=] call[name[set], parameter[]]
if name[self].hdrobj.sections begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f812290>, <ast.Name object at 0x7da18f8129b0>]]] in starred[name[self].hdrobj.sections] begin[:]
variable[hdrgos_all_set] assign[=] call[name[set], parameter[name[hdrgos_all_lst]]]
variable[hdrgos_act_set] assign[=] call[name[hdrgos_all_set].intersection, parameter[name[hdrgos_act_all]]]
if name[hdrgos_act_set] begin[:]
<ast.AugAssign object at 0x7da18f8121d0>
variable[hdrgos_act_lst] assign[=] list[[]]
variable[hdrgos_act_ctr] assign[=] call[name[cx].Counter, parameter[]]
for taget[name[hdrgo_p]] in starred[name[hdrgos_all_lst]] begin[:]
if <ast.BoolOp object at 0x7da18f8103a0> begin[:]
call[name[hdrgos_act_lst].append, parameter[name[hdrgo_p]]]
<ast.AugAssign object at 0x7da18f811840>
call[name[sections_hdrgos_act].append, parameter[tuple[[<ast.Name object at 0x7da18f813310>, <ast.Name object at 0x7da18f811de0>]]]]
variable[hdrgos_act_rem] assign[=] call[name[hdrgos_act_all].difference, parameter[name[hdrgos_act_secs]]]
if name[hdrgos_act_rem] begin[:]
call[name[sections_hdrgos_act].append, parameter[tuple[[<ast.Attribute object at 0x7da18f811000>, <ast.Name object at 0x7da18f813b20>]]]]
return[name[sections_hdrgos_act]] | keyword[def] identifier[get_sections_2d] ( identifier[self] ):
literal[string]
identifier[sections_hdrgos_act] =[]
identifier[hdrgos_act_all] = identifier[self] . identifier[get_hdrgos] ()
identifier[hdrgos_act_secs] = identifier[set] ()
keyword[if] identifier[self] . identifier[hdrobj] . identifier[sections] :
keyword[for] identifier[section_name] , identifier[hdrgos_all_lst] keyword[in] identifier[self] . identifier[hdrobj] . identifier[sections] :
identifier[hdrgos_all_set] = identifier[set] ( identifier[hdrgos_all_lst] )
identifier[hdrgos_act_set] = identifier[hdrgos_all_set] . identifier[intersection] ( identifier[hdrgos_act_all] )
keyword[if] identifier[hdrgos_act_set] :
identifier[hdrgos_act_secs] |= identifier[hdrgos_act_set]
identifier[hdrgos_act_lst] =[]
identifier[hdrgos_act_ctr] = identifier[cx] . identifier[Counter] ()
keyword[for] identifier[hdrgo_p] keyword[in] identifier[hdrgos_all_lst] :
keyword[if] identifier[hdrgo_p] keyword[in] identifier[hdrgos_act_set] keyword[and] identifier[hdrgos_act_ctr] [ identifier[hdrgo_p] ]== literal[int] :
identifier[hdrgos_act_lst] . identifier[append] ( identifier[hdrgo_p] )
identifier[hdrgos_act_ctr] [ identifier[hdrgo_p] ]+= literal[int]
identifier[sections_hdrgos_act] . identifier[append] (( identifier[section_name] , identifier[hdrgos_act_lst] ))
identifier[hdrgos_act_rem] = identifier[hdrgos_act_all] . identifier[difference] ( identifier[hdrgos_act_secs] )
keyword[if] identifier[hdrgos_act_rem] :
identifier[sections_hdrgos_act] . identifier[append] (( identifier[self] . identifier[hdrobj] . identifier[secdflt] , identifier[hdrgos_act_rem] ))
keyword[else] :
identifier[sections_hdrgos_act] . identifier[append] (( identifier[self] . identifier[hdrobj] . identifier[secdflt] , identifier[hdrgos_act_all] ))
keyword[return] identifier[sections_hdrgos_act] | def get_sections_2d(self):
"""Get 2-D list of sections and hdrgos sets actually used in grouping."""
sections_hdrgos_act = []
hdrgos_act_all = self.get_hdrgos() # Header GOs actually used to group
hdrgos_act_secs = set()
if self.hdrobj.sections:
for (section_name, hdrgos_all_lst) in self.hdrobj.sections:
# print("GGGGGGGGGGGGGGGGG {N:3} {NAME}".format(N=len(hdrgos_all_lst), NAME=section_name))
hdrgos_all_set = set(hdrgos_all_lst)
hdrgos_act_set = hdrgos_all_set.intersection(hdrgos_act_all)
if hdrgos_act_set:
hdrgos_act_secs |= hdrgos_act_set
# Use original order of header GOs found in sections
hdrgos_act_lst = []
hdrgos_act_ctr = cx.Counter()
for hdrgo_p in hdrgos_all_lst: # Header GO that may or may not be used.
if hdrgo_p in hdrgos_act_set and hdrgos_act_ctr[hdrgo_p] == 0:
hdrgos_act_lst.append(hdrgo_p) # depends on [control=['if'], data=[]]
hdrgos_act_ctr[hdrgo_p] += 1 # depends on [control=['for'], data=['hdrgo_p']]
sections_hdrgos_act.append((section_name, hdrgos_act_lst)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# print(">>>>>>>>>>>>>>> hdrgos_act_all {N:3}".format(N=len(hdrgos_act_all)))
# print(">>>>>>>>>>>>>>> hdrgos_act_secs {N:3}".format(N=len(hdrgos_act_secs)))
hdrgos_act_rem = hdrgos_act_all.difference(hdrgos_act_secs)
if hdrgos_act_rem:
# print("RRRRRRRRRRR {N:3}".format(N=len(hdrgos_act_rem)))
sections_hdrgos_act.append((self.hdrobj.secdflt, hdrgos_act_rem)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
sections_hdrgos_act.append((self.hdrobj.secdflt, hdrgos_act_all))
return sections_hdrgos_act |
def distinct(self):
"""
Only return distinct row.
Return a new query set with distinct mark
"""
new_query_set = self.clone()
new_query_set.query.distinct = True
return new_query_set | def function[distinct, parameter[self]]:
constant[
Only return distinct row.
Return a new query set with distinct mark
]
variable[new_query_set] assign[=] call[name[self].clone, parameter[]]
name[new_query_set].query.distinct assign[=] constant[True]
return[name[new_query_set]] | keyword[def] identifier[distinct] ( identifier[self] ):
literal[string]
identifier[new_query_set] = identifier[self] . identifier[clone] ()
identifier[new_query_set] . identifier[query] . identifier[distinct] = keyword[True]
keyword[return] identifier[new_query_set] | def distinct(self):
"""
Only return distinct row.
Return a new query set with distinct mark
"""
new_query_set = self.clone()
new_query_set.query.distinct = True
return new_query_set |
def setup(cls):
"""
Main method to auto setup Apphook
It must be called after the Apphook registration::
apphook_pool.register(MyApp)
MyApp.setup()
"""
try:
if cls.auto_setup and cls.auto_setup.get('enabled', False):
if not cls.auto_setup.get('home title', False):
warnings.warn(
'"home title" is not set in {0}.auto_setup attribute'.format(cls)
)
return
if not cls.auto_setup.get('page title', False):
warnings.warn(
'"page title" is not set in {0}.auto_setup attribute'.format(cls)
)
return
if cls.app_name and not cls.auto_setup.get('namespace', False):
warnings.warn(
'"page title" is not set in {0}.auto_setup attribute'.format(cls)
)
return
config = None
cls._setup_pages(config)
except Exception:
# Ignore any error during setup. Worst case: pages are not created, but the instance
# won't break
pass | def function[setup, parameter[cls]]:
constant[
Main method to auto setup Apphook
It must be called after the Apphook registration::
apphook_pool.register(MyApp)
MyApp.setup()
]
<ast.Try object at 0x7da1b1fa9ea0> | keyword[def] identifier[setup] ( identifier[cls] ):
literal[string]
keyword[try] :
keyword[if] identifier[cls] . identifier[auto_setup] keyword[and] identifier[cls] . identifier[auto_setup] . identifier[get] ( literal[string] , keyword[False] ):
keyword[if] keyword[not] identifier[cls] . identifier[auto_setup] . identifier[get] ( literal[string] , keyword[False] ):
identifier[warnings] . identifier[warn] (
literal[string] . identifier[format] ( identifier[cls] )
)
keyword[return]
keyword[if] keyword[not] identifier[cls] . identifier[auto_setup] . identifier[get] ( literal[string] , keyword[False] ):
identifier[warnings] . identifier[warn] (
literal[string] . identifier[format] ( identifier[cls] )
)
keyword[return]
keyword[if] identifier[cls] . identifier[app_name] keyword[and] keyword[not] identifier[cls] . identifier[auto_setup] . identifier[get] ( literal[string] , keyword[False] ):
identifier[warnings] . identifier[warn] (
literal[string] . identifier[format] ( identifier[cls] )
)
keyword[return]
identifier[config] = keyword[None]
identifier[cls] . identifier[_setup_pages] ( identifier[config] )
keyword[except] identifier[Exception] :
keyword[pass] | def setup(cls):
"""
Main method to auto setup Apphook
It must be called after the Apphook registration::
apphook_pool.register(MyApp)
MyApp.setup()
"""
try:
if cls.auto_setup and cls.auto_setup.get('enabled', False):
if not cls.auto_setup.get('home title', False):
warnings.warn('"home title" is not set in {0}.auto_setup attribute'.format(cls))
return # depends on [control=['if'], data=[]]
if not cls.auto_setup.get('page title', False):
warnings.warn('"page title" is not set in {0}.auto_setup attribute'.format(cls))
return # depends on [control=['if'], data=[]]
if cls.app_name and (not cls.auto_setup.get('namespace', False)):
warnings.warn('"page title" is not set in {0}.auto_setup attribute'.format(cls))
return # depends on [control=['if'], data=[]]
config = None
cls._setup_pages(config) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
# Ignore any error during setup. Worst case: pages are not created, but the instance
# won't break
pass # depends on [control=['except'], data=[]] |
def qvalues1(PV,m=None,pi=1.0):
"""estimate q vlaues from a list of Pvalues
this algorihm is taken from Storey, significance testing for genomic ...
m: number of tests, (if not len(PV)), pi: fraction of expected true null (1.0 is a conservative estimate)
@param PV: pvalues
@param m: total number of tests if PV is not the entire array.
@param pi: fraction of true null
"""
S = PV.shape
PV = PV.flatten()
if m is None:
m = len(PV) * 1.0
else:
m*=1.0
lPV = len(PV)
#1. sort pvalues
PV = PV.squeeze()
IPV = PV.argsort()
PV = PV[IPV]
#2. estimate lambda
if pi is None:
lrange = sp.linspace(0.05,0.95,max(lPV/100.0,10))
pil = sp.double((PV[:,sp.newaxis]>lrange).sum(axis=0))/lPV
pilr = pil/(1.0-lrange)
#ok, I think for SNPs this is pretty useless, pi is close to 1!
pi =1.0
#if there is something useful in there use the something close to 1
if pilr[-1]<1.0:
pi = pilr[-1]
#3. initialise q values
QV_ = pi * m/lPV* PV
QV_[-1] = min(QV_[-1],1.0)
#4. update estimate
for i in range(lPV-2,-1,-1):
QV_[i] = min(pi*m*PV[i]/(i+1.0),QV_[i+1])
#5. invert sorting
QV = sp.zeros_like(PV)
QV[IPV] = QV_
QV = QV.reshape(S)
return QV | def function[qvalues1, parameter[PV, m, pi]]:
constant[estimate q vlaues from a list of Pvalues
this algorihm is taken from Storey, significance testing for genomic ...
m: number of tests, (if not len(PV)), pi: fraction of expected true null (1.0 is a conservative estimate)
@param PV: pvalues
@param m: total number of tests if PV is not the entire array.
@param pi: fraction of true null
]
variable[S] assign[=] name[PV].shape
variable[PV] assign[=] call[name[PV].flatten, parameter[]]
if compare[name[m] is constant[None]] begin[:]
variable[m] assign[=] binary_operation[call[name[len], parameter[name[PV]]] * constant[1.0]]
variable[lPV] assign[=] call[name[len], parameter[name[PV]]]
variable[PV] assign[=] call[name[PV].squeeze, parameter[]]
variable[IPV] assign[=] call[name[PV].argsort, parameter[]]
variable[PV] assign[=] call[name[PV]][name[IPV]]
if compare[name[pi] is constant[None]] begin[:]
variable[lrange] assign[=] call[name[sp].linspace, parameter[constant[0.05], constant[0.95], call[name[max], parameter[binary_operation[name[lPV] / constant[100.0]], constant[10]]]]]
variable[pil] assign[=] binary_operation[call[name[sp].double, parameter[call[compare[call[name[PV]][tuple[[<ast.Slice object at 0x7da18f813be0>, <ast.Attribute object at 0x7da18f8137f0>]]] greater[>] name[lrange]].sum, parameter[]]]] / name[lPV]]
variable[pilr] assign[=] binary_operation[name[pil] / binary_operation[constant[1.0] - name[lrange]]]
variable[pi] assign[=] constant[1.0]
if compare[call[name[pilr]][<ast.UnaryOp object at 0x7da18f8110f0>] less[<] constant[1.0]] begin[:]
variable[pi] assign[=] call[name[pilr]][<ast.UnaryOp object at 0x7da18f8104f0>]
variable[QV_] assign[=] binary_operation[binary_operation[binary_operation[name[pi] * name[m]] / name[lPV]] * name[PV]]
call[name[QV_]][<ast.UnaryOp object at 0x7da18f811b40>] assign[=] call[name[min], parameter[call[name[QV_]][<ast.UnaryOp object at 0x7da18f813400>], constant[1.0]]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[name[lPV] - constant[2]], <ast.UnaryOp object at 0x7da18f811840>, <ast.UnaryOp object at 0x7da18f8110c0>]]] begin[:]
call[name[QV_]][name[i]] assign[=] call[name[min], parameter[binary_operation[binary_operation[binary_operation[name[pi] * name[m]] * call[name[PV]][name[i]]] / binary_operation[name[i] + constant[1.0]]], call[name[QV_]][binary_operation[name[i] + constant[1]]]]]
variable[QV] assign[=] call[name[sp].zeros_like, parameter[name[PV]]]
call[name[QV]][name[IPV]] assign[=] name[QV_]
variable[QV] assign[=] call[name[QV].reshape, parameter[name[S]]]
return[name[QV]] | keyword[def] identifier[qvalues1] ( identifier[PV] , identifier[m] = keyword[None] , identifier[pi] = literal[int] ):
literal[string]
identifier[S] = identifier[PV] . identifier[shape]
identifier[PV] = identifier[PV] . identifier[flatten] ()
keyword[if] identifier[m] keyword[is] keyword[None] :
identifier[m] = identifier[len] ( identifier[PV] )* literal[int]
keyword[else] :
identifier[m] *= literal[int]
identifier[lPV] = identifier[len] ( identifier[PV] )
identifier[PV] = identifier[PV] . identifier[squeeze] ()
identifier[IPV] = identifier[PV] . identifier[argsort] ()
identifier[PV] = identifier[PV] [ identifier[IPV] ]
keyword[if] identifier[pi] keyword[is] keyword[None] :
identifier[lrange] = identifier[sp] . identifier[linspace] ( literal[int] , literal[int] , identifier[max] ( identifier[lPV] / literal[int] , literal[int] ))
identifier[pil] = identifier[sp] . identifier[double] (( identifier[PV] [:, identifier[sp] . identifier[newaxis] ]> identifier[lrange] ). identifier[sum] ( identifier[axis] = literal[int] ))/ identifier[lPV]
identifier[pilr] = identifier[pil] /( literal[int] - identifier[lrange] )
identifier[pi] = literal[int]
keyword[if] identifier[pilr] [- literal[int] ]< literal[int] :
identifier[pi] = identifier[pilr] [- literal[int] ]
identifier[QV_] = identifier[pi] * identifier[m] / identifier[lPV] * identifier[PV]
identifier[QV_] [- literal[int] ]= identifier[min] ( identifier[QV_] [- literal[int] ], literal[int] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[lPV] - literal[int] ,- literal[int] ,- literal[int] ):
identifier[QV_] [ identifier[i] ]= identifier[min] ( identifier[pi] * identifier[m] * identifier[PV] [ identifier[i] ]/( identifier[i] + literal[int] ), identifier[QV_] [ identifier[i] + literal[int] ])
identifier[QV] = identifier[sp] . identifier[zeros_like] ( identifier[PV] )
identifier[QV] [ identifier[IPV] ]= identifier[QV_]
identifier[QV] = identifier[QV] . identifier[reshape] ( identifier[S] )
keyword[return] identifier[QV] | def qvalues1(PV, m=None, pi=1.0):
"""estimate q vlaues from a list of Pvalues
this algorihm is taken from Storey, significance testing for genomic ...
m: number of tests, (if not len(PV)), pi: fraction of expected true null (1.0 is a conservative estimate)
@param PV: pvalues
@param m: total number of tests if PV is not the entire array.
@param pi: fraction of true null
"""
S = PV.shape
PV = PV.flatten()
if m is None:
m = len(PV) * 1.0 # depends on [control=['if'], data=['m']]
else:
m *= 1.0
lPV = len(PV)
#1. sort pvalues
PV = PV.squeeze()
IPV = PV.argsort()
PV = PV[IPV]
#2. estimate lambda
if pi is None:
lrange = sp.linspace(0.05, 0.95, max(lPV / 100.0, 10))
pil = sp.double((PV[:, sp.newaxis] > lrange).sum(axis=0)) / lPV
pilr = pil / (1.0 - lrange)
#ok, I think for SNPs this is pretty useless, pi is close to 1!
pi = 1.0
#if there is something useful in there use the something close to 1
if pilr[-1] < 1.0:
pi = pilr[-1] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['pi']]
#3. initialise q values
QV_ = pi * m / lPV * PV
QV_[-1] = min(QV_[-1], 1.0)
#4. update estimate
for i in range(lPV - 2, -1, -1):
QV_[i] = min(pi * m * PV[i] / (i + 1.0), QV_[i + 1]) # depends on [control=['for'], data=['i']]
#5. invert sorting
QV = sp.zeros_like(PV)
QV[IPV] = QV_
QV = QV.reshape(S)
return QV |
def loadExperimentDescriptionScriptFromDir(experimentDir):
""" Loads the experiment description python script from the given experiment
directory.
:param experimentDir: (string) experiment directory path
:returns: module of the loaded experiment description scripts
"""
descriptionScriptPath = os.path.join(experimentDir, "description.py")
module = _loadDescriptionFile(descriptionScriptPath)
return module | def function[loadExperimentDescriptionScriptFromDir, parameter[experimentDir]]:
constant[ Loads the experiment description python script from the given experiment
directory.
:param experimentDir: (string) experiment directory path
:returns: module of the loaded experiment description scripts
]
variable[descriptionScriptPath] assign[=] call[name[os].path.join, parameter[name[experimentDir], constant[description.py]]]
variable[module] assign[=] call[name[_loadDescriptionFile], parameter[name[descriptionScriptPath]]]
return[name[module]] | keyword[def] identifier[loadExperimentDescriptionScriptFromDir] ( identifier[experimentDir] ):
literal[string]
identifier[descriptionScriptPath] = identifier[os] . identifier[path] . identifier[join] ( identifier[experimentDir] , literal[string] )
identifier[module] = identifier[_loadDescriptionFile] ( identifier[descriptionScriptPath] )
keyword[return] identifier[module] | def loadExperimentDescriptionScriptFromDir(experimentDir):
""" Loads the experiment description python script from the given experiment
directory.
:param experimentDir: (string) experiment directory path
:returns: module of the loaded experiment description scripts
"""
descriptionScriptPath = os.path.join(experimentDir, 'description.py')
module = _loadDescriptionFile(descriptionScriptPath)
return module |
def _do_cb(self, cb, error_cb, *args, **kw):
"""
Called internally by callback(). Does cb and error_cb selection.
"""
try:
res = self.work(*args, **kw)
except Exception as e:
if error_cb is None:
show_err()
elif error_cb:
error_cb(e)
else:
# Success, let's call away!
cb(res) | def function[_do_cb, parameter[self, cb, error_cb]]:
constant[
Called internally by callback(). Does cb and error_cb selection.
]
<ast.Try object at 0x7da2041da350> | keyword[def] identifier[_do_cb] ( identifier[self] , identifier[cb] , identifier[error_cb] ,* identifier[args] ,** identifier[kw] ):
literal[string]
keyword[try] :
identifier[res] = identifier[self] . identifier[work] (* identifier[args] ,** identifier[kw] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[if] identifier[error_cb] keyword[is] keyword[None] :
identifier[show_err] ()
keyword[elif] identifier[error_cb] :
identifier[error_cb] ( identifier[e] )
keyword[else] :
identifier[cb] ( identifier[res] ) | def _do_cb(self, cb, error_cb, *args, **kw):
"""
Called internally by callback(). Does cb and error_cb selection.
"""
try:
res = self.work(*args, **kw) # depends on [control=['try'], data=[]]
except Exception as e:
if error_cb is None:
show_err() # depends on [control=['if'], data=[]]
elif error_cb:
error_cb(e) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']]
else:
# Success, let's call away!
cb(res) |
def _set_cfunctions(self):
# type: () -> None
""" Set all ctypes functions and attach them to attributes. """
def cfactory(func, argtypes, restype):
# type: (str, List[Any], Any) -> None
""" Factorize ctypes creations. """
self._cfactory(
attr=self.core, func=func, argtypes=argtypes, restype=restype
)
uint32 = ctypes.c_uint32
void = ctypes.c_void_p
size_t = ctypes.c_size_t
pointer = ctypes.POINTER
cfactory(
func="CGGetActiveDisplayList",
argtypes=[uint32, pointer(uint32), pointer(uint32)],
restype=ctypes.c_int32,
)
cfactory(func="CGDisplayBounds", argtypes=[uint32], restype=CGRect)
cfactory(func="CGRectStandardize", argtypes=[CGRect], restype=CGRect)
cfactory(func="CGRectUnion", argtypes=[CGRect, CGRect], restype=CGRect)
cfactory(func="CGDisplayRotation", argtypes=[uint32], restype=ctypes.c_float)
cfactory(
func="CGWindowListCreateImage",
argtypes=[CGRect, uint32, uint32, uint32],
restype=void,
)
cfactory(func="CGImageGetWidth", argtypes=[void], restype=size_t)
cfactory(func="CGImageGetHeight", argtypes=[void], restype=size_t)
cfactory(func="CGImageGetDataProvider", argtypes=[void], restype=void)
cfactory(func="CGDataProviderCopyData", argtypes=[void], restype=void)
cfactory(func="CFDataGetBytePtr", argtypes=[void], restype=void)
cfactory(func="CFDataGetLength", argtypes=[void], restype=ctypes.c_uint64)
cfactory(func="CGImageGetBytesPerRow", argtypes=[void], restype=size_t)
cfactory(func="CGImageGetBitsPerPixel", argtypes=[void], restype=size_t)
cfactory(func="CGDataProviderRelease", argtypes=[void], restype=void)
cfactory(func="CFRelease", argtypes=[void], restype=void) | def function[_set_cfunctions, parameter[self]]:
constant[ Set all ctypes functions and attach them to attributes. ]
def function[cfactory, parameter[func, argtypes, restype]]:
constant[ Factorize ctypes creations. ]
call[name[self]._cfactory, parameter[]]
variable[uint32] assign[=] name[ctypes].c_uint32
variable[void] assign[=] name[ctypes].c_void_p
variable[size_t] assign[=] name[ctypes].c_size_t
variable[pointer] assign[=] name[ctypes].POINTER
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]]
call[name[cfactory], parameter[]] | keyword[def] identifier[_set_cfunctions] ( identifier[self] ):
literal[string]
keyword[def] identifier[cfactory] ( identifier[func] , identifier[argtypes] , identifier[restype] ):
literal[string]
identifier[self] . identifier[_cfactory] (
identifier[attr] = identifier[self] . identifier[core] , identifier[func] = identifier[func] , identifier[argtypes] = identifier[argtypes] , identifier[restype] = identifier[restype]
)
identifier[uint32] = identifier[ctypes] . identifier[c_uint32]
identifier[void] = identifier[ctypes] . identifier[c_void_p]
identifier[size_t] = identifier[ctypes] . identifier[c_size_t]
identifier[pointer] = identifier[ctypes] . identifier[POINTER]
identifier[cfactory] (
identifier[func] = literal[string] ,
identifier[argtypes] =[ identifier[uint32] , identifier[pointer] ( identifier[uint32] ), identifier[pointer] ( identifier[uint32] )],
identifier[restype] = identifier[ctypes] . identifier[c_int32] ,
)
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[uint32] ], identifier[restype] = identifier[CGRect] )
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[CGRect] ], identifier[restype] = identifier[CGRect] )
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[CGRect] , identifier[CGRect] ], identifier[restype] = identifier[CGRect] )
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[uint32] ], identifier[restype] = identifier[ctypes] . identifier[c_float] )
identifier[cfactory] (
identifier[func] = literal[string] ,
identifier[argtypes] =[ identifier[CGRect] , identifier[uint32] , identifier[uint32] , identifier[uint32] ],
identifier[restype] = identifier[void] ,
)
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[void] ], identifier[restype] = identifier[size_t] )
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[void] ], identifier[restype] = identifier[size_t] )
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[void] ], identifier[restype] = identifier[void] )
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[void] ], identifier[restype] = identifier[void] )
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[void] ], identifier[restype] = identifier[void] )
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[void] ], identifier[restype] = identifier[ctypes] . identifier[c_uint64] )
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[void] ], identifier[restype] = identifier[size_t] )
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[void] ], identifier[restype] = identifier[size_t] )
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[void] ], identifier[restype] = identifier[void] )
identifier[cfactory] ( identifier[func] = literal[string] , identifier[argtypes] =[ identifier[void] ], identifier[restype] = identifier[void] ) | def _set_cfunctions(self):
# type: () -> None
' Set all ctypes functions and attach them to attributes. '
def cfactory(func, argtypes, restype):
# type: (str, List[Any], Any) -> None
' Factorize ctypes creations. '
self._cfactory(attr=self.core, func=func, argtypes=argtypes, restype=restype)
uint32 = ctypes.c_uint32
void = ctypes.c_void_p
size_t = ctypes.c_size_t
pointer = ctypes.POINTER
cfactory(func='CGGetActiveDisplayList', argtypes=[uint32, pointer(uint32), pointer(uint32)], restype=ctypes.c_int32)
cfactory(func='CGDisplayBounds', argtypes=[uint32], restype=CGRect)
cfactory(func='CGRectStandardize', argtypes=[CGRect], restype=CGRect)
cfactory(func='CGRectUnion', argtypes=[CGRect, CGRect], restype=CGRect)
cfactory(func='CGDisplayRotation', argtypes=[uint32], restype=ctypes.c_float)
cfactory(func='CGWindowListCreateImage', argtypes=[CGRect, uint32, uint32, uint32], restype=void)
cfactory(func='CGImageGetWidth', argtypes=[void], restype=size_t)
cfactory(func='CGImageGetHeight', argtypes=[void], restype=size_t)
cfactory(func='CGImageGetDataProvider', argtypes=[void], restype=void)
cfactory(func='CGDataProviderCopyData', argtypes=[void], restype=void)
cfactory(func='CFDataGetBytePtr', argtypes=[void], restype=void)
cfactory(func='CFDataGetLength', argtypes=[void], restype=ctypes.c_uint64)
cfactory(func='CGImageGetBytesPerRow', argtypes=[void], restype=size_t)
cfactory(func='CGImageGetBitsPerPixel', argtypes=[void], restype=size_t)
cfactory(func='CGDataProviderRelease', argtypes=[void], restype=void)
cfactory(func='CFRelease', argtypes=[void], restype=void) |
def get_primes(n):
"""Return list of all primes less than n,
Using sieve of Eratosthenes.
"""
if n <= 0:
raise ValueError("'n' must be a positive integer.")
# If x is even, exclude x from list (-1):
sieve_size = (n // 2 - 1) if n % 2 == 0 else (n // 2)
sieve = [True for _ in range(sieve_size)] # Sieve
primes = [] # List of Primes
if n >= 2:
primes.append(2) # 2 is prime by default
for i in range(sieve_size):
if sieve[i]:
value_at_i = i*2 + 3
primes.append(value_at_i)
for j in range(i, sieve_size, value_at_i):
sieve[j] = False
return primes | def function[get_primes, parameter[n]]:
constant[Return list of all primes less than n,
Using sieve of Eratosthenes.
]
if compare[name[n] less_or_equal[<=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b209b790>
variable[sieve_size] assign[=] <ast.IfExp object at 0x7da1b2099a80>
variable[sieve] assign[=] <ast.ListComp object at 0x7da1b20998d0>
variable[primes] assign[=] list[[]]
if compare[name[n] greater_or_equal[>=] constant[2]] begin[:]
call[name[primes].append, parameter[constant[2]]]
for taget[name[i]] in starred[call[name[range], parameter[name[sieve_size]]]] begin[:]
if call[name[sieve]][name[i]] begin[:]
variable[value_at_i] assign[=] binary_operation[binary_operation[name[i] * constant[2]] + constant[3]]
call[name[primes].append, parameter[name[value_at_i]]]
for taget[name[j]] in starred[call[name[range], parameter[name[i], name[sieve_size], name[value_at_i]]]] begin[:]
call[name[sieve]][name[j]] assign[=] constant[False]
return[name[primes]] | keyword[def] identifier[get_primes] ( identifier[n] ):
literal[string]
keyword[if] identifier[n] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[sieve_size] =( identifier[n] // literal[int] - literal[int] ) keyword[if] identifier[n] % literal[int] == literal[int] keyword[else] ( identifier[n] // literal[int] )
identifier[sieve] =[ keyword[True] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[sieve_size] )]
identifier[primes] =[]
keyword[if] identifier[n] >= literal[int] :
identifier[primes] . identifier[append] ( literal[int] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[sieve_size] ):
keyword[if] identifier[sieve] [ identifier[i] ]:
identifier[value_at_i] = identifier[i] * literal[int] + literal[int]
identifier[primes] . identifier[append] ( identifier[value_at_i] )
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[i] , identifier[sieve_size] , identifier[value_at_i] ):
identifier[sieve] [ identifier[j] ]= keyword[False]
keyword[return] identifier[primes] | def get_primes(n):
"""Return list of all primes less than n,
Using sieve of Eratosthenes.
"""
if n <= 0:
raise ValueError("'n' must be a positive integer.") # depends on [control=['if'], data=[]]
# If x is even, exclude x from list (-1):
sieve_size = n // 2 - 1 if n % 2 == 0 else n // 2
sieve = [True for _ in range(sieve_size)] # Sieve
primes = [] # List of Primes
if n >= 2:
primes.append(2) # 2 is prime by default # depends on [control=['if'], data=[]]
for i in range(sieve_size):
if sieve[i]:
value_at_i = i * 2 + 3
primes.append(value_at_i)
for j in range(i, sieve_size, value_at_i):
sieve[j] = False # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return primes |
def get_parallel_bucket(buckets: List[Tuple[int, int]],
length_source: int,
length_target: int) -> Tuple[Optional[int], Optional[Tuple[int, int]]]:
"""
Returns bucket index and bucket from a list of buckets, given source and target length.
Algorithm assumes buckets are sorted from shortest to longest.
Returns (None, None) if no bucket fits.
:param buckets: List of buckets, in sorted order, shortest to longest.
:param length_source: Length of source sequence.
:param length_target: Length of target sequence.
:return: Tuple of (bucket index, bucket), or (None, None) if not fitting.
"""
for j, (source_bkt, target_bkt) in enumerate(buckets):
if source_bkt >= length_source and target_bkt >= length_target:
return j, (source_bkt, target_bkt)
return None, None | def function[get_parallel_bucket, parameter[buckets, length_source, length_target]]:
constant[
Returns bucket index and bucket from a list of buckets, given source and target length.
Algorithm assumes buckets are sorted from shortest to longest.
Returns (None, None) if no bucket fits.
:param buckets: List of buckets, in sorted order, shortest to longest.
:param length_source: Length of source sequence.
:param length_target: Length of target sequence.
:return: Tuple of (bucket index, bucket), or (None, None) if not fitting.
]
for taget[tuple[[<ast.Name object at 0x7da1b1d0d4b0>, <ast.Tuple object at 0x7da1b1d0f6a0>]]] in starred[call[name[enumerate], parameter[name[buckets]]]] begin[:]
if <ast.BoolOp object at 0x7da1b1d0ec50> begin[:]
return[tuple[[<ast.Name object at 0x7da1b1d0c790>, <ast.Tuple object at 0x7da1b1d0dfc0>]]]
return[tuple[[<ast.Constant object at 0x7da1b1d0d390>, <ast.Constant object at 0x7da1b1d0c670>]]] | keyword[def] identifier[get_parallel_bucket] ( identifier[buckets] : identifier[List] [ identifier[Tuple] [ identifier[int] , identifier[int] ]],
identifier[length_source] : identifier[int] ,
identifier[length_target] : identifier[int] )-> identifier[Tuple] [ identifier[Optional] [ identifier[int] ], identifier[Optional] [ identifier[Tuple] [ identifier[int] , identifier[int] ]]]:
literal[string]
keyword[for] identifier[j] ,( identifier[source_bkt] , identifier[target_bkt] ) keyword[in] identifier[enumerate] ( identifier[buckets] ):
keyword[if] identifier[source_bkt] >= identifier[length_source] keyword[and] identifier[target_bkt] >= identifier[length_target] :
keyword[return] identifier[j] ,( identifier[source_bkt] , identifier[target_bkt] )
keyword[return] keyword[None] , keyword[None] | def get_parallel_bucket(buckets: List[Tuple[int, int]], length_source: int, length_target: int) -> Tuple[Optional[int], Optional[Tuple[int, int]]]:
"""
Returns bucket index and bucket from a list of buckets, given source and target length.
Algorithm assumes buckets are sorted from shortest to longest.
Returns (None, None) if no bucket fits.
:param buckets: List of buckets, in sorted order, shortest to longest.
:param length_source: Length of source sequence.
:param length_target: Length of target sequence.
:return: Tuple of (bucket index, bucket), or (None, None) if not fitting.
"""
for (j, (source_bkt, target_bkt)) in enumerate(buckets):
if source_bkt >= length_source and target_bkt >= length_target:
return (j, (source_bkt, target_bkt)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return (None, None) |
def _keys(expr):
"""
Retrieve keys of a dict
:param expr: dict sequence / scalar
:return:
"""
if isinstance(expr, SequenceExpr):
dtype = expr.data_type
else:
dtype = expr.value_type
return composite_op(expr, DictKeys, df_types.List(dtype.key_type)) | def function[_keys, parameter[expr]]:
constant[
Retrieve keys of a dict
:param expr: dict sequence / scalar
:return:
]
if call[name[isinstance], parameter[name[expr], name[SequenceExpr]]] begin[:]
variable[dtype] assign[=] name[expr].data_type
return[call[name[composite_op], parameter[name[expr], name[DictKeys], call[name[df_types].List, parameter[name[dtype].key_type]]]]] | keyword[def] identifier[_keys] ( identifier[expr] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[expr] , identifier[SequenceExpr] ):
identifier[dtype] = identifier[expr] . identifier[data_type]
keyword[else] :
identifier[dtype] = identifier[expr] . identifier[value_type]
keyword[return] identifier[composite_op] ( identifier[expr] , identifier[DictKeys] , identifier[df_types] . identifier[List] ( identifier[dtype] . identifier[key_type] )) | def _keys(expr):
"""
Retrieve keys of a dict
:param expr: dict sequence / scalar
:return:
"""
if isinstance(expr, SequenceExpr):
dtype = expr.data_type # depends on [control=['if'], data=[]]
else:
dtype = expr.value_type
return composite_op(expr, DictKeys, df_types.List(dtype.key_type)) |
def on_props_activated(self, menu_item):
'''显示选中的文件或者当前目录的属性'''
tree_paths = self.iconview.get_selected_items()
if not tree_paths:
dialog = FolderPropertyDialog(self, self.app, self.parent.path)
dialog.run()
dialog.destroy()
else:
for tree_path in tree_paths:
pcs_file = self.get_pcs_file(tree_path)
dialog = PropertiesDialog(self.parent, self.app, pcs_file)
dialog.run()
dialog.destroy() | def function[on_props_activated, parameter[self, menu_item]]:
constant[显示选中的文件或者当前目录的属性]
variable[tree_paths] assign[=] call[name[self].iconview.get_selected_items, parameter[]]
if <ast.UnaryOp object at 0x7da20c7c9e10> begin[:]
variable[dialog] assign[=] call[name[FolderPropertyDialog], parameter[name[self], name[self].app, name[self].parent.path]]
call[name[dialog].run, parameter[]]
call[name[dialog].destroy, parameter[]] | keyword[def] identifier[on_props_activated] ( identifier[self] , identifier[menu_item] ):
literal[string]
identifier[tree_paths] = identifier[self] . identifier[iconview] . identifier[get_selected_items] ()
keyword[if] keyword[not] identifier[tree_paths] :
identifier[dialog] = identifier[FolderPropertyDialog] ( identifier[self] , identifier[self] . identifier[app] , identifier[self] . identifier[parent] . identifier[path] )
identifier[dialog] . identifier[run] ()
identifier[dialog] . identifier[destroy] ()
keyword[else] :
keyword[for] identifier[tree_path] keyword[in] identifier[tree_paths] :
identifier[pcs_file] = identifier[self] . identifier[get_pcs_file] ( identifier[tree_path] )
identifier[dialog] = identifier[PropertiesDialog] ( identifier[self] . identifier[parent] , identifier[self] . identifier[app] , identifier[pcs_file] )
identifier[dialog] . identifier[run] ()
identifier[dialog] . identifier[destroy] () | def on_props_activated(self, menu_item):
"""显示选中的文件或者当前目录的属性"""
tree_paths = self.iconview.get_selected_items()
if not tree_paths:
dialog = FolderPropertyDialog(self, self.app, self.parent.path)
dialog.run()
dialog.destroy() # depends on [control=['if'], data=[]]
else:
for tree_path in tree_paths:
pcs_file = self.get_pcs_file(tree_path)
dialog = PropertiesDialog(self.parent, self.app, pcs_file)
dialog.run()
dialog.destroy() # depends on [control=['for'], data=['tree_path']] |
def generate(env):
"""Add Builders and construction variables for jar to an Environment."""
SCons.Tool.CreateJarBuilder(env)
SCons.Tool.CreateJavaFileBuilder(env)
SCons.Tool.CreateJavaClassFileBuilder(env)
SCons.Tool.CreateJavaClassDirBuilder(env)
env.AddMethod(Jar)
env['JAR'] = 'jar'
env['JARFLAGS'] = SCons.Util.CLVar('cf')
env['_JARFLAGS'] = jarFlags
env['_JARMANIFEST'] = jarManifest
env['_JARSOURCES'] = jarSources
env['_JARCOM'] = '$JAR $_JARFLAGS $TARGET $_JARMANIFEST $_JARSOURCES'
env['JARCOM'] = "${TEMPFILE('$_JARCOM','$JARCOMSTR')}"
env['JARSUFFIX'] = '.jar' | def function[generate, parameter[env]]:
constant[Add Builders and construction variables for jar to an Environment.]
call[name[SCons].Tool.CreateJarBuilder, parameter[name[env]]]
call[name[SCons].Tool.CreateJavaFileBuilder, parameter[name[env]]]
call[name[SCons].Tool.CreateJavaClassFileBuilder, parameter[name[env]]]
call[name[SCons].Tool.CreateJavaClassDirBuilder, parameter[name[env]]]
call[name[env].AddMethod, parameter[name[Jar]]]
call[name[env]][constant[JAR]] assign[=] constant[jar]
call[name[env]][constant[JARFLAGS]] assign[=] call[name[SCons].Util.CLVar, parameter[constant[cf]]]
call[name[env]][constant[_JARFLAGS]] assign[=] name[jarFlags]
call[name[env]][constant[_JARMANIFEST]] assign[=] name[jarManifest]
call[name[env]][constant[_JARSOURCES]] assign[=] name[jarSources]
call[name[env]][constant[_JARCOM]] assign[=] constant[$JAR $_JARFLAGS $TARGET $_JARMANIFEST $_JARSOURCES]
call[name[env]][constant[JARCOM]] assign[=] constant[${TEMPFILE('$_JARCOM','$JARCOMSTR')}]
call[name[env]][constant[JARSUFFIX]] assign[=] constant[.jar] | keyword[def] identifier[generate] ( identifier[env] ):
literal[string]
identifier[SCons] . identifier[Tool] . identifier[CreateJarBuilder] ( identifier[env] )
identifier[SCons] . identifier[Tool] . identifier[CreateJavaFileBuilder] ( identifier[env] )
identifier[SCons] . identifier[Tool] . identifier[CreateJavaClassFileBuilder] ( identifier[env] )
identifier[SCons] . identifier[Tool] . identifier[CreateJavaClassDirBuilder] ( identifier[env] )
identifier[env] . identifier[AddMethod] ( identifier[Jar] )
identifier[env] [ literal[string] ]= literal[string]
identifier[env] [ literal[string] ]= identifier[SCons] . identifier[Util] . identifier[CLVar] ( literal[string] )
identifier[env] [ literal[string] ]= identifier[jarFlags]
identifier[env] [ literal[string] ]= identifier[jarManifest]
identifier[env] [ literal[string] ]= identifier[jarSources]
identifier[env] [ literal[string] ]= literal[string]
identifier[env] [ literal[string] ]= literal[string]
identifier[env] [ literal[string] ]= literal[string] | def generate(env):
"""Add Builders and construction variables for jar to an Environment."""
SCons.Tool.CreateJarBuilder(env)
SCons.Tool.CreateJavaFileBuilder(env)
SCons.Tool.CreateJavaClassFileBuilder(env)
SCons.Tool.CreateJavaClassDirBuilder(env)
env.AddMethod(Jar)
env['JAR'] = 'jar'
env['JARFLAGS'] = SCons.Util.CLVar('cf')
env['_JARFLAGS'] = jarFlags
env['_JARMANIFEST'] = jarManifest
env['_JARSOURCES'] = jarSources
env['_JARCOM'] = '$JAR $_JARFLAGS $TARGET $_JARMANIFEST $_JARSOURCES'
env['JARCOM'] = "${TEMPFILE('$_JARCOM','$JARCOMSTR')}"
env['JARSUFFIX'] = '.jar' |
def material_find_resource(filename, cdn, use_minified=None, local=True):
"""Resource finding function, also available in templates.
Tries to find a resource, will force SSL depending on
``MATERIAL_CDN_FORCE_SSL`` settings.
:param filename: File to find a URL for.
:param cdn: Name of the CDN to use.
:param use_minified': If set to ``True``/``False``, use/don't use
minified. If ``None``, honors
``MATERIAL_USE_MINIFIED``.
:param local: If ``True``, uses the ``local``-CDN when
``MATERIAL_SERVE_LOCAL`` is enabled. If ``False``, uses
the ``static``-CDN instead.
:return: A URL.
"""
config = current_app.config
if config['MATERIAL_SERVE_LOCAL']:
if 'css/' not in filename and 'js/' not in filename:
filename = 'js/' + filename
if None == use_minified:
use_minified = config['MATERIAL_USE_MINIFIED']
if use_minified:
filename = '%s.min.%s' % tuple(filename.rsplit('.', 1))
cdns = current_app.extensions['material']['cdns']
resource_url = cdns[cdn].get_resource_url(filename)
if resource_url.startswith('//') and config['MATERIAL_CDN_FORCE_SSL']:
resource_url = 'https:%s' % resource_url
return resource_url | def function[material_find_resource, parameter[filename, cdn, use_minified, local]]:
constant[Resource finding function, also available in templates.
Tries to find a resource, will force SSL depending on
``MATERIAL_CDN_FORCE_SSL`` settings.
:param filename: File to find a URL for.
:param cdn: Name of the CDN to use.
:param use_minified': If set to ``True``/``False``, use/don't use
minified. If ``None``, honors
``MATERIAL_USE_MINIFIED``.
:param local: If ``True``, uses the ``local``-CDN when
``MATERIAL_SERVE_LOCAL`` is enabled. If ``False``, uses
the ``static``-CDN instead.
:return: A URL.
]
variable[config] assign[=] name[current_app].config
if call[name[config]][constant[MATERIAL_SERVE_LOCAL]] begin[:]
if <ast.BoolOp object at 0x7da1b0da0cd0> begin[:]
variable[filename] assign[=] binary_operation[constant[js/] + name[filename]]
if compare[constant[None] equal[==] name[use_minified]] begin[:]
variable[use_minified] assign[=] call[name[config]][constant[MATERIAL_USE_MINIFIED]]
if name[use_minified] begin[:]
variable[filename] assign[=] binary_operation[constant[%s.min.%s] <ast.Mod object at 0x7da2590d6920> call[name[tuple], parameter[call[name[filename].rsplit, parameter[constant[.], constant[1]]]]]]
variable[cdns] assign[=] call[call[name[current_app].extensions][constant[material]]][constant[cdns]]
variable[resource_url] assign[=] call[call[name[cdns]][name[cdn]].get_resource_url, parameter[name[filename]]]
if <ast.BoolOp object at 0x7da20e9b3910> begin[:]
variable[resource_url] assign[=] binary_operation[constant[https:%s] <ast.Mod object at 0x7da2590d6920> name[resource_url]]
return[name[resource_url]] | keyword[def] identifier[material_find_resource] ( identifier[filename] , identifier[cdn] , identifier[use_minified] = keyword[None] , identifier[local] = keyword[True] ):
literal[string]
identifier[config] = identifier[current_app] . identifier[config]
keyword[if] identifier[config] [ literal[string] ]:
keyword[if] literal[string] keyword[not] keyword[in] identifier[filename] keyword[and] literal[string] keyword[not] keyword[in] identifier[filename] :
identifier[filename] = literal[string] + identifier[filename]
keyword[if] keyword[None] == identifier[use_minified] :
identifier[use_minified] = identifier[config] [ literal[string] ]
keyword[if] identifier[use_minified] :
identifier[filename] = literal[string] % identifier[tuple] ( identifier[filename] . identifier[rsplit] ( literal[string] , literal[int] ))
identifier[cdns] = identifier[current_app] . identifier[extensions] [ literal[string] ][ literal[string] ]
identifier[resource_url] = identifier[cdns] [ identifier[cdn] ]. identifier[get_resource_url] ( identifier[filename] )
keyword[if] identifier[resource_url] . identifier[startswith] ( literal[string] ) keyword[and] identifier[config] [ literal[string] ]:
identifier[resource_url] = literal[string] % identifier[resource_url]
keyword[return] identifier[resource_url] | def material_find_resource(filename, cdn, use_minified=None, local=True):
"""Resource finding function, also available in templates.
Tries to find a resource, will force SSL depending on
``MATERIAL_CDN_FORCE_SSL`` settings.
:param filename: File to find a URL for.
:param cdn: Name of the CDN to use.
:param use_minified': If set to ``True``/``False``, use/don't use
minified. If ``None``, honors
``MATERIAL_USE_MINIFIED``.
:param local: If ``True``, uses the ``local``-CDN when
``MATERIAL_SERVE_LOCAL`` is enabled. If ``False``, uses
the ``static``-CDN instead.
:return: A URL.
"""
config = current_app.config
if config['MATERIAL_SERVE_LOCAL']:
if 'css/' not in filename and 'js/' not in filename:
filename = 'js/' + filename # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if None == use_minified:
use_minified = config['MATERIAL_USE_MINIFIED'] # depends on [control=['if'], data=['use_minified']]
if use_minified:
filename = '%s.min.%s' % tuple(filename.rsplit('.', 1)) # depends on [control=['if'], data=[]]
cdns = current_app.extensions['material']['cdns']
resource_url = cdns[cdn].get_resource_url(filename)
if resource_url.startswith('//') and config['MATERIAL_CDN_FORCE_SSL']:
resource_url = 'https:%s' % resource_url # depends on [control=['if'], data=[]]
return resource_url |
def read_config_file(path):
"""Returns the configuration from the specified file."""
try:
with open(path, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
except IOError as ex:
if ex != errno.ENOENT:
raise
return {} | def function[read_config_file, parameter[path]]:
constant[Returns the configuration from the specified file.]
<ast.Try object at 0x7da20c9913c0>
return[dictionary[[], []]] | keyword[def] identifier[read_config_file] ( identifier[path] ):
literal[string]
keyword[try] :
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
keyword[return] identifier[json] . identifier[load] ( identifier[f] , identifier[object_pairs_hook] = identifier[OrderedDict] )
keyword[except] identifier[IOError] keyword[as] identifier[ex] :
keyword[if] identifier[ex] != identifier[errno] . identifier[ENOENT] :
keyword[raise]
keyword[return] {} | def read_config_file(path):
"""Returns the configuration from the specified file."""
try:
with open(path, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict) # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except IOError as ex:
if ex != errno.ENOENT:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['ex']]
return {} |
def search_assignable_users_for_issues(self,
username,
project=None,
issueKey=None,
expand=None,
startAt=0,
maxResults=50,
):
"""Get a list of user Resources that match the search string for assigning or creating issues.
This method is intended to find users that are eligible to create issues in a project or be assigned
to an existing issue. When searching for eligible creators, specify a project. When searching for eligible
assignees, specify an issue key.
:param username: A string to match usernames against
:type username: str
:param project: Filter returned users by permission in this project (expected if a result will be used to
create an issue)
:type project: Optional[str]
:param issueKey: Filter returned users by this issue (expected if a result will be used to edit this issue)
:type issueKey: Optional[str]
:param expand: Extra information to fetch inside each resource
:type expand: Optional[Any]
:param startAt: Index of the first user to return (Default: 0)
:type startAt: int
:param maxResults: maximum number of users to return.
If maxResults evaluates as False, it will try to get all items in batches. (Default: 50)
:rtype: ResultList
"""
params = {
'username': username}
if project is not None:
params['project'] = project
if issueKey is not None:
params['issueKey'] = issueKey
if expand is not None:
params['expand'] = expand
return self._fetch_pages(User, None, 'user/assignable/search', startAt, maxResults, params) | def function[search_assignable_users_for_issues, parameter[self, username, project, issueKey, expand, startAt, maxResults]]:
constant[Get a list of user Resources that match the search string for assigning or creating issues.
This method is intended to find users that are eligible to create issues in a project or be assigned
to an existing issue. When searching for eligible creators, specify a project. When searching for eligible
assignees, specify an issue key.
:param username: A string to match usernames against
:type username: str
:param project: Filter returned users by permission in this project (expected if a result will be used to
create an issue)
:type project: Optional[str]
:param issueKey: Filter returned users by this issue (expected if a result will be used to edit this issue)
:type issueKey: Optional[str]
:param expand: Extra information to fetch inside each resource
:type expand: Optional[Any]
:param startAt: Index of the first user to return (Default: 0)
:type startAt: int
:param maxResults: maximum number of users to return.
If maxResults evaluates as False, it will try to get all items in batches. (Default: 50)
:rtype: ResultList
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b21d6800>], [<ast.Name object at 0x7da1b21f3760>]]
if compare[name[project] is_not constant[None]] begin[:]
call[name[params]][constant[project]] assign[=] name[project]
if compare[name[issueKey] is_not constant[None]] begin[:]
call[name[params]][constant[issueKey]] assign[=] name[issueKey]
if compare[name[expand] is_not constant[None]] begin[:]
call[name[params]][constant[expand]] assign[=] name[expand]
return[call[name[self]._fetch_pages, parameter[name[User], constant[None], constant[user/assignable/search], name[startAt], name[maxResults], name[params]]]] | keyword[def] identifier[search_assignable_users_for_issues] ( identifier[self] ,
identifier[username] ,
identifier[project] = keyword[None] ,
identifier[issueKey] = keyword[None] ,
identifier[expand] = keyword[None] ,
identifier[startAt] = literal[int] ,
identifier[maxResults] = literal[int] ,
):
literal[string]
identifier[params] ={
literal[string] : identifier[username] }
keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[project]
keyword[if] identifier[issueKey] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[issueKey]
keyword[if] identifier[expand] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[expand]
keyword[return] identifier[self] . identifier[_fetch_pages] ( identifier[User] , keyword[None] , literal[string] , identifier[startAt] , identifier[maxResults] , identifier[params] ) | def search_assignable_users_for_issues(self, username, project=None, issueKey=None, expand=None, startAt=0, maxResults=50):
"""Get a list of user Resources that match the search string for assigning or creating issues.
This method is intended to find users that are eligible to create issues in a project or be assigned
to an existing issue. When searching for eligible creators, specify a project. When searching for eligible
assignees, specify an issue key.
:param username: A string to match usernames against
:type username: str
:param project: Filter returned users by permission in this project (expected if a result will be used to
create an issue)
:type project: Optional[str]
:param issueKey: Filter returned users by this issue (expected if a result will be used to edit this issue)
:type issueKey: Optional[str]
:param expand: Extra information to fetch inside each resource
:type expand: Optional[Any]
:param startAt: Index of the first user to return (Default: 0)
:type startAt: int
:param maxResults: maximum number of users to return.
If maxResults evaluates as False, it will try to get all items in batches. (Default: 50)
:rtype: ResultList
"""
params = {'username': username}
if project is not None:
params['project'] = project # depends on [control=['if'], data=['project']]
if issueKey is not None:
params['issueKey'] = issueKey # depends on [control=['if'], data=['issueKey']]
if expand is not None:
params['expand'] = expand # depends on [control=['if'], data=['expand']]
return self._fetch_pages(User, None, 'user/assignable/search', startAt, maxResults, params) |
def _init_topics(self):
"""Set up initial subscription of mysensors topics."""
_LOGGER.info('Setting up initial MQTT topic subscription')
init_topics = [
'{}/+/+/0/+/+'.format(self._in_prefix),
'{}/+/+/3/+/+'.format(self._in_prefix),
]
self._handle_subscription(init_topics)
if not self.persistence:
return
topics = [
'{}/{}/{}/{}/+/+'.format(
self._in_prefix, str(sensor.sensor_id), str(child.id),
msg_type) for sensor in self.sensors.values()
for child in sensor.children.values()
for msg_type in (int(self.const.MessageType.set),
int(self.const.MessageType.req))
]
topics.extend([
'{}/{}/+/{}/+/+'.format(
self._in_prefix, str(sensor.sensor_id),
int(self.const.MessageType.stream))
for sensor in self.sensors.values()])
self._handle_subscription(topics) | def function[_init_topics, parameter[self]]:
constant[Set up initial subscription of mysensors topics.]
call[name[_LOGGER].info, parameter[constant[Setting up initial MQTT topic subscription]]]
variable[init_topics] assign[=] list[[<ast.Call object at 0x7da20cabcbe0>, <ast.Call object at 0x7da20cabc0a0>]]
call[name[self]._handle_subscription, parameter[name[init_topics]]]
if <ast.UnaryOp object at 0x7da20cabe5f0> begin[:]
return[None]
variable[topics] assign[=] <ast.ListComp object at 0x7da20cabe050>
call[name[topics].extend, parameter[<ast.ListComp object at 0x7da20cabcee0>]]
call[name[self]._handle_subscription, parameter[name[topics]]] | keyword[def] identifier[_init_topics] ( identifier[self] ):
literal[string]
identifier[_LOGGER] . identifier[info] ( literal[string] )
identifier[init_topics] =[
literal[string] . identifier[format] ( identifier[self] . identifier[_in_prefix] ),
literal[string] . identifier[format] ( identifier[self] . identifier[_in_prefix] ),
]
identifier[self] . identifier[_handle_subscription] ( identifier[init_topics] )
keyword[if] keyword[not] identifier[self] . identifier[persistence] :
keyword[return]
identifier[topics] =[
literal[string] . identifier[format] (
identifier[self] . identifier[_in_prefix] , identifier[str] ( identifier[sensor] . identifier[sensor_id] ), identifier[str] ( identifier[child] . identifier[id] ),
identifier[msg_type] ) keyword[for] identifier[sensor] keyword[in] identifier[self] . identifier[sensors] . identifier[values] ()
keyword[for] identifier[child] keyword[in] identifier[sensor] . identifier[children] . identifier[values] ()
keyword[for] identifier[msg_type] keyword[in] ( identifier[int] ( identifier[self] . identifier[const] . identifier[MessageType] . identifier[set] ),
identifier[int] ( identifier[self] . identifier[const] . identifier[MessageType] . identifier[req] ))
]
identifier[topics] . identifier[extend] ([
literal[string] . identifier[format] (
identifier[self] . identifier[_in_prefix] , identifier[str] ( identifier[sensor] . identifier[sensor_id] ),
identifier[int] ( identifier[self] . identifier[const] . identifier[MessageType] . identifier[stream] ))
keyword[for] identifier[sensor] keyword[in] identifier[self] . identifier[sensors] . identifier[values] ()])
identifier[self] . identifier[_handle_subscription] ( identifier[topics] ) | def _init_topics(self):
"""Set up initial subscription of mysensors topics."""
_LOGGER.info('Setting up initial MQTT topic subscription')
init_topics = ['{}/+/+/0/+/+'.format(self._in_prefix), '{}/+/+/3/+/+'.format(self._in_prefix)]
self._handle_subscription(init_topics)
if not self.persistence:
return # depends on [control=['if'], data=[]]
topics = ['{}/{}/{}/{}/+/+'.format(self._in_prefix, str(sensor.sensor_id), str(child.id), msg_type) for sensor in self.sensors.values() for child in sensor.children.values() for msg_type in (int(self.const.MessageType.set), int(self.const.MessageType.req))]
topics.extend(['{}/{}/+/{}/+/+'.format(self._in_prefix, str(sensor.sensor_id), int(self.const.MessageType.stream)) for sensor in self.sensors.values()])
self._handle_subscription(topics) |
def scrub(data, units=False):
"""
For input data [w,f,e] or [w,f] returns the list with NaN, negative, and zero flux
(and corresponding wavelengths and errors) removed.
"""
units = [i.unit if hasattr(i, 'unit') else 1 for i in data]
data = [np.asarray(i.value if hasattr(i, 'unit') else i, dtype=np.float32) for i in data if
isinstance(i, np.ndarray)]
data = [i[np.where(~np.isinf(data[1]))] for i in data]
data = [i[np.where(np.logical_and(data[1] > 0, ~np.isnan(data[1])))] for i in data]
data = [i[np.unique(data[0], return_index=True)[1]] for i in data]
return [i[np.lexsort([data[0]])] * Q for i, Q in zip(data, units)] if units else [i[np.lexsort([data[0]])] for i in
data] | def function[scrub, parameter[data, units]]:
constant[
For input data [w,f,e] or [w,f] returns the list with NaN, negative, and zero flux
(and corresponding wavelengths and errors) removed.
]
variable[units] assign[=] <ast.ListComp object at 0x7da1b0ac4c10>
variable[data] assign[=] <ast.ListComp object at 0x7da1b0ac7c10>
variable[data] assign[=] <ast.ListComp object at 0x7da1b0ac7610>
variable[data] assign[=] <ast.ListComp object at 0x7da1b0bf19f0>
variable[data] assign[=] <ast.ListComp object at 0x7da1b0bf3b80>
return[<ast.IfExp object at 0x7da1b0a9ee30>] | keyword[def] identifier[scrub] ( identifier[data] , identifier[units] = keyword[False] ):
literal[string]
identifier[units] =[ identifier[i] . identifier[unit] keyword[if] identifier[hasattr] ( identifier[i] , literal[string] ) keyword[else] literal[int] keyword[for] identifier[i] keyword[in] identifier[data] ]
identifier[data] =[ identifier[np] . identifier[asarray] ( identifier[i] . identifier[value] keyword[if] identifier[hasattr] ( identifier[i] , literal[string] ) keyword[else] identifier[i] , identifier[dtype] = identifier[np] . identifier[float32] ) keyword[for] identifier[i] keyword[in] identifier[data] keyword[if]
identifier[isinstance] ( identifier[i] , identifier[np] . identifier[ndarray] )]
identifier[data] =[ identifier[i] [ identifier[np] . identifier[where] (~ identifier[np] . identifier[isinf] ( identifier[data] [ literal[int] ]))] keyword[for] identifier[i] keyword[in] identifier[data] ]
identifier[data] =[ identifier[i] [ identifier[np] . identifier[where] ( identifier[np] . identifier[logical_and] ( identifier[data] [ literal[int] ]> literal[int] ,~ identifier[np] . identifier[isnan] ( identifier[data] [ literal[int] ])))] keyword[for] identifier[i] keyword[in] identifier[data] ]
identifier[data] =[ identifier[i] [ identifier[np] . identifier[unique] ( identifier[data] [ literal[int] ], identifier[return_index] = keyword[True] )[ literal[int] ]] keyword[for] identifier[i] keyword[in] identifier[data] ]
keyword[return] [ identifier[i] [ identifier[np] . identifier[lexsort] ([ identifier[data] [ literal[int] ]])]* identifier[Q] keyword[for] identifier[i] , identifier[Q] keyword[in] identifier[zip] ( identifier[data] , identifier[units] )] keyword[if] identifier[units] keyword[else] [ identifier[i] [ identifier[np] . identifier[lexsort] ([ identifier[data] [ literal[int] ]])] keyword[for] identifier[i] keyword[in]
identifier[data] ] | def scrub(data, units=False):
"""
For input data [w,f,e] or [w,f] returns the list with NaN, negative, and zero flux
(and corresponding wavelengths and errors) removed.
"""
units = [i.unit if hasattr(i, 'unit') else 1 for i in data]
data = [np.asarray(i.value if hasattr(i, 'unit') else i, dtype=np.float32) for i in data if isinstance(i, np.ndarray)]
data = [i[np.where(~np.isinf(data[1]))] for i in data]
data = [i[np.where(np.logical_and(data[1] > 0, ~np.isnan(data[1])))] for i in data]
data = [i[np.unique(data[0], return_index=True)[1]] for i in data]
return [i[np.lexsort([data[0]])] * Q for (i, Q) in zip(data, units)] if units else [i[np.lexsort([data[0]])] for i in data] |
def inspectorDataRange(inspector, percentage):
""" Calculates the range from the inspectors' sliced array. Discards percentage of the minimum
and percentage of the maximum values of the inspector.slicedArray
Meant to be used with functools.partial for filling the autorange methods combobox.
The first parameter is an inspector, it's not an array, because we would then have to
regenerate the range function every time sliced array of an inspector changes.
"""
logger.debug("Discarding {}% from id: {}".format(percentage, id(inspector.slicedArray)))
return maskedNanPercentile(inspector.slicedArray, (percentage, 100-percentage) ) | def function[inspectorDataRange, parameter[inspector, percentage]]:
constant[ Calculates the range from the inspectors' sliced array. Discards percentage of the minimum
and percentage of the maximum values of the inspector.slicedArray
Meant to be used with functools.partial for filling the autorange methods combobox.
The first parameter is an inspector, it's not an array, because we would then have to
regenerate the range function every time sliced array of an inspector changes.
]
call[name[logger].debug, parameter[call[constant[Discarding {}% from id: {}].format, parameter[name[percentage], call[name[id], parameter[name[inspector].slicedArray]]]]]]
return[call[name[maskedNanPercentile], parameter[name[inspector].slicedArray, tuple[[<ast.Name object at 0x7da1b0414670>, <ast.BinOp object at 0x7da1b04172b0>]]]]] | keyword[def] identifier[inspectorDataRange] ( identifier[inspector] , identifier[percentage] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[percentage] , identifier[id] ( identifier[inspector] . identifier[slicedArray] )))
keyword[return] identifier[maskedNanPercentile] ( identifier[inspector] . identifier[slicedArray] ,( identifier[percentage] , literal[int] - identifier[percentage] )) | def inspectorDataRange(inspector, percentage):
""" Calculates the range from the inspectors' sliced array. Discards percentage of the minimum
and percentage of the maximum values of the inspector.slicedArray
Meant to be used with functools.partial for filling the autorange methods combobox.
The first parameter is an inspector, it's not an array, because we would then have to
regenerate the range function every time sliced array of an inspector changes.
"""
logger.debug('Discarding {}% from id: {}'.format(percentage, id(inspector.slicedArray)))
return maskedNanPercentile(inspector.slicedArray, (percentage, 100 - percentage)) |
def get_tunnel_statistics_output_tunnel_stat_rx_bytes(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_tunnel_statistics = ET.Element("get_tunnel_statistics")
config = get_tunnel_statistics
output = ET.SubElement(get_tunnel_statistics, "output")
tunnel_stat = ET.SubElement(output, "tunnel-stat")
rx_bytes = ET.SubElement(tunnel_stat, "rx-bytes")
rx_bytes.text = kwargs.pop('rx_bytes')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_tunnel_statistics_output_tunnel_stat_rx_bytes, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_tunnel_statistics] assign[=] call[name[ET].Element, parameter[constant[get_tunnel_statistics]]]
variable[config] assign[=] name[get_tunnel_statistics]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_tunnel_statistics], constant[output]]]
variable[tunnel_stat] assign[=] call[name[ET].SubElement, parameter[name[output], constant[tunnel-stat]]]
variable[rx_bytes] assign[=] call[name[ET].SubElement, parameter[name[tunnel_stat], constant[rx-bytes]]]
name[rx_bytes].text assign[=] call[name[kwargs].pop, parameter[constant[rx_bytes]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_tunnel_statistics_output_tunnel_stat_rx_bytes] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_tunnel_statistics] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_tunnel_statistics]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_tunnel_statistics] , literal[string] )
identifier[tunnel_stat] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[rx_bytes] = identifier[ET] . identifier[SubElement] ( identifier[tunnel_stat] , literal[string] )
identifier[rx_bytes] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_tunnel_statistics_output_tunnel_stat_rx_bytes(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_tunnel_statistics = ET.Element('get_tunnel_statistics')
config = get_tunnel_statistics
output = ET.SubElement(get_tunnel_statistics, 'output')
tunnel_stat = ET.SubElement(output, 'tunnel-stat')
rx_bytes = ET.SubElement(tunnel_stat, 'rx-bytes')
rx_bytes.text = kwargs.pop('rx_bytes')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _multiplexed_buffer_helper(self, response):
"""A generator of multiplexed data blocks read from a buffered
response."""
buf = self._result(response, binary=True)
buf_length = len(buf)
walker = 0
while True:
if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
break
header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
_, length = struct.unpack_from('>BxxxL', header)
start = walker + STREAM_HEADER_SIZE_BYTES
end = start + length
walker = end
yield buf[start:end] | def function[_multiplexed_buffer_helper, parameter[self, response]]:
constant[A generator of multiplexed data blocks read from a buffered
response.]
variable[buf] assign[=] call[name[self]._result, parameter[name[response]]]
variable[buf_length] assign[=] call[name[len], parameter[name[buf]]]
variable[walker] assign[=] constant[0]
while constant[True] begin[:]
if compare[binary_operation[name[buf_length] - name[walker]] less[<] name[STREAM_HEADER_SIZE_BYTES]] begin[:]
break
variable[header] assign[=] call[name[buf]][<ast.Slice object at 0x7da18c4cf550>]
<ast.Tuple object at 0x7da18c4cd150> assign[=] call[name[struct].unpack_from, parameter[constant[>BxxxL], name[header]]]
variable[start] assign[=] binary_operation[name[walker] + name[STREAM_HEADER_SIZE_BYTES]]
variable[end] assign[=] binary_operation[name[start] + name[length]]
variable[walker] assign[=] name[end]
<ast.Yield object at 0x7da18c4cdff0> | keyword[def] identifier[_multiplexed_buffer_helper] ( identifier[self] , identifier[response] ):
literal[string]
identifier[buf] = identifier[self] . identifier[_result] ( identifier[response] , identifier[binary] = keyword[True] )
identifier[buf_length] = identifier[len] ( identifier[buf] )
identifier[walker] = literal[int]
keyword[while] keyword[True] :
keyword[if] identifier[buf_length] - identifier[walker] < identifier[STREAM_HEADER_SIZE_BYTES] :
keyword[break]
identifier[header] = identifier[buf] [ identifier[walker] : identifier[walker] + identifier[STREAM_HEADER_SIZE_BYTES] ]
identifier[_] , identifier[length] = identifier[struct] . identifier[unpack_from] ( literal[string] , identifier[header] )
identifier[start] = identifier[walker] + identifier[STREAM_HEADER_SIZE_BYTES]
identifier[end] = identifier[start] + identifier[length]
identifier[walker] = identifier[end]
keyword[yield] identifier[buf] [ identifier[start] : identifier[end] ] | def _multiplexed_buffer_helper(self, response):
"""A generator of multiplexed data blocks read from a buffered
response."""
buf = self._result(response, binary=True)
buf_length = len(buf)
walker = 0
while True:
if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
break # depends on [control=['if'], data=[]]
header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
(_, length) = struct.unpack_from('>BxxxL', header)
start = walker + STREAM_HEADER_SIZE_BYTES
end = start + length
walker = end
yield buf[start:end] # depends on [control=['while'], data=[]] |
def greater_than(self, greater_than):
"""Adds new `>` condition
:param greater_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime)
:raise:
- QueryTypeError: if `greater_than` is of an unexpected type
"""
if hasattr(greater_than, 'strftime'):
greater_than = datetime_as_utc(greater_than).strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(greater_than, six.string_types):
raise QueryTypeError('Expected value of type `int` or instance of `datetime`, not %s' % type(greater_than))
return self._add_condition('>', greater_than, types=[int, str]) | def function[greater_than, parameter[self, greater_than]]:
constant[Adds new `>` condition
:param greater_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime)
:raise:
- QueryTypeError: if `greater_than` is of an unexpected type
]
if call[name[hasattr], parameter[name[greater_than], constant[strftime]]] begin[:]
variable[greater_than] assign[=] call[call[name[datetime_as_utc], parameter[name[greater_than]]].strftime, parameter[constant[%Y-%m-%d %H:%M:%S]]]
return[call[name[self]._add_condition, parameter[constant[>], name[greater_than]]]] | keyword[def] identifier[greater_than] ( identifier[self] , identifier[greater_than] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[greater_than] , literal[string] ):
identifier[greater_than] = identifier[datetime_as_utc] ( identifier[greater_than] ). identifier[strftime] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[greater_than] , identifier[six] . identifier[string_types] ):
keyword[raise] identifier[QueryTypeError] ( literal[string] % identifier[type] ( identifier[greater_than] ))
keyword[return] identifier[self] . identifier[_add_condition] ( literal[string] , identifier[greater_than] , identifier[types] =[ identifier[int] , identifier[str] ]) | def greater_than(self, greater_than):
"""Adds new `>` condition
:param greater_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime)
:raise:
- QueryTypeError: if `greater_than` is of an unexpected type
"""
if hasattr(greater_than, 'strftime'):
greater_than = datetime_as_utc(greater_than).strftime('%Y-%m-%d %H:%M:%S') # depends on [control=['if'], data=[]]
elif isinstance(greater_than, six.string_types):
raise QueryTypeError('Expected value of type `int` or instance of `datetime`, not %s' % type(greater_than)) # depends on [control=['if'], data=[]]
return self._add_condition('>', greater_than, types=[int, str]) |
def aboveAt(self, offset=0):
""" Returns point in the center of the region's top side (offset to the top
by negative ``offset``) """
return Location(self.getX() + (self.getW() / 2), self.getY() + offset) | def function[aboveAt, parameter[self, offset]]:
constant[ Returns point in the center of the region's top side (offset to the top
by negative ``offset``) ]
return[call[name[Location], parameter[binary_operation[call[name[self].getX, parameter[]] + binary_operation[call[name[self].getW, parameter[]] / constant[2]]], binary_operation[call[name[self].getY, parameter[]] + name[offset]]]]] | keyword[def] identifier[aboveAt] ( identifier[self] , identifier[offset] = literal[int] ):
literal[string]
keyword[return] identifier[Location] ( identifier[self] . identifier[getX] ()+( identifier[self] . identifier[getW] ()/ literal[int] ), identifier[self] . identifier[getY] ()+ identifier[offset] ) | def aboveAt(self, offset=0):
""" Returns point in the center of the region's top side (offset to the top
by negative ``offset``) """
return Location(self.getX() + self.getW() / 2, self.getY() + offset) |
async def handle_command(bot: NoneBot, ctx: Context_T) -> bool:
"""
Handle a message as a command.
This function is typically called by "handle_message".
:param bot: NoneBot instance
:param ctx: message context
:return: the message is handled as a command
"""
cmd, current_arg = parse_command(bot, str(ctx['message']).lstrip())
is_privileged_cmd = cmd and cmd.privileged
if is_privileged_cmd and cmd.only_to_me and not ctx['to_me']:
is_privileged_cmd = False
disable_interaction = is_privileged_cmd
if is_privileged_cmd:
logger.debug(f'Command {cmd.name} is a privileged command')
ctx_id = context_id(ctx)
if not is_privileged_cmd:
# wait for 1.5 seconds (at most) if the current session is running
retry = 5
while retry > 0 and \
_sessions.get(ctx_id) and _sessions[ctx_id].running:
retry -= 1
await asyncio.sleep(0.3)
check_perm = True
session = _sessions.get(ctx_id) if not is_privileged_cmd else None
if session:
if session.running:
logger.warning(f'There is a session of command '
f'{session.cmd.name} running, notify the user')
asyncio.ensure_future(send(
bot, ctx,
render_expression(bot.config.SESSION_RUNNING_EXPRESSION)
))
# pretend we are successful, so that NLP won't handle it
return True
if session.is_valid:
logger.debug(f'Session of command {session.cmd.name} exists')
# since it's in a session, the user must be talking to me
ctx['to_me'] = True
session.refresh(ctx, current_arg=str(ctx['message']))
# there is no need to check permission for existing session
check_perm = False
else:
# the session is expired, remove it
logger.debug(f'Session of command {session.cmd.name} is expired')
if ctx_id in _sessions:
del _sessions[ctx_id]
session = None
if not session:
if not cmd:
logger.debug('Not a known command, ignored')
return False
if cmd.only_to_me and not ctx['to_me']:
logger.debug('Not to me, ignored')
return False
session = CommandSession(bot, ctx, cmd, current_arg=current_arg)
logger.debug(f'New session of command {session.cmd.name} created')
return await _real_run_command(session, ctx_id, check_perm=check_perm,
disable_interaction=disable_interaction) | <ast.AsyncFunctionDef object at 0x7da18f811360> | keyword[async] keyword[def] identifier[handle_command] ( identifier[bot] : identifier[NoneBot] , identifier[ctx] : identifier[Context_T] )-> identifier[bool] :
literal[string]
identifier[cmd] , identifier[current_arg] = identifier[parse_command] ( identifier[bot] , identifier[str] ( identifier[ctx] [ literal[string] ]). identifier[lstrip] ())
identifier[is_privileged_cmd] = identifier[cmd] keyword[and] identifier[cmd] . identifier[privileged]
keyword[if] identifier[is_privileged_cmd] keyword[and] identifier[cmd] . identifier[only_to_me] keyword[and] keyword[not] identifier[ctx] [ literal[string] ]:
identifier[is_privileged_cmd] = keyword[False]
identifier[disable_interaction] = identifier[is_privileged_cmd]
keyword[if] identifier[is_privileged_cmd] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[ctx_id] = identifier[context_id] ( identifier[ctx] )
keyword[if] keyword[not] identifier[is_privileged_cmd] :
identifier[retry] = literal[int]
keyword[while] identifier[retry] > literal[int] keyword[and] identifier[_sessions] . identifier[get] ( identifier[ctx_id] ) keyword[and] identifier[_sessions] [ identifier[ctx_id] ]. identifier[running] :
identifier[retry] -= literal[int]
keyword[await] identifier[asyncio] . identifier[sleep] ( literal[int] )
identifier[check_perm] = keyword[True]
identifier[session] = identifier[_sessions] . identifier[get] ( identifier[ctx_id] ) keyword[if] keyword[not] identifier[is_privileged_cmd] keyword[else] keyword[None]
keyword[if] identifier[session] :
keyword[if] identifier[session] . identifier[running] :
identifier[logger] . identifier[warning] ( literal[string]
literal[string] )
identifier[asyncio] . identifier[ensure_future] ( identifier[send] (
identifier[bot] , identifier[ctx] ,
identifier[render_expression] ( identifier[bot] . identifier[config] . identifier[SESSION_RUNNING_EXPRESSION] )
))
keyword[return] keyword[True]
keyword[if] identifier[session] . identifier[is_valid] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[ctx] [ literal[string] ]= keyword[True]
identifier[session] . identifier[refresh] ( identifier[ctx] , identifier[current_arg] = identifier[str] ( identifier[ctx] [ literal[string] ]))
identifier[check_perm] = keyword[False]
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[if] identifier[ctx_id] keyword[in] identifier[_sessions] :
keyword[del] identifier[_sessions] [ identifier[ctx_id] ]
identifier[session] = keyword[None]
keyword[if] keyword[not] identifier[session] :
keyword[if] keyword[not] identifier[cmd] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] keyword[False]
keyword[if] identifier[cmd] . identifier[only_to_me] keyword[and] keyword[not] identifier[ctx] [ literal[string] ]:
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] keyword[False]
identifier[session] = identifier[CommandSession] ( identifier[bot] , identifier[ctx] , identifier[cmd] , identifier[current_arg] = identifier[current_arg] )
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] keyword[await] identifier[_real_run_command] ( identifier[session] , identifier[ctx_id] , identifier[check_perm] = identifier[check_perm] ,
identifier[disable_interaction] = identifier[disable_interaction] ) | async def handle_command(bot: NoneBot, ctx: Context_T) -> bool:
"""
Handle a message as a command.
This function is typically called by "handle_message".
:param bot: NoneBot instance
:param ctx: message context
:return: the message is handled as a command
"""
(cmd, current_arg) = parse_command(bot, str(ctx['message']).lstrip())
is_privileged_cmd = cmd and cmd.privileged
if is_privileged_cmd and cmd.only_to_me and (not ctx['to_me']):
is_privileged_cmd = False # depends on [control=['if'], data=[]]
disable_interaction = is_privileged_cmd
if is_privileged_cmd:
logger.debug(f'Command {cmd.name} is a privileged command') # depends on [control=['if'], data=[]]
ctx_id = context_id(ctx)
if not is_privileged_cmd:
# wait for 1.5 seconds (at most) if the current session is running
retry = 5
while retry > 0 and _sessions.get(ctx_id) and _sessions[ctx_id].running:
retry -= 1
await asyncio.sleep(0.3) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
check_perm = True
session = _sessions.get(ctx_id) if not is_privileged_cmd else None
if session:
if session.running:
logger.warning(f'There is a session of command {session.cmd.name} running, notify the user')
asyncio.ensure_future(send(bot, ctx, render_expression(bot.config.SESSION_RUNNING_EXPRESSION)))
# pretend we are successful, so that NLP won't handle it
return True # depends on [control=['if'], data=[]]
if session.is_valid:
logger.debug(f'Session of command {session.cmd.name} exists')
# since it's in a session, the user must be talking to me
ctx['to_me'] = True
session.refresh(ctx, current_arg=str(ctx['message']))
# there is no need to check permission for existing session
check_perm = False # depends on [control=['if'], data=[]]
else:
# the session is expired, remove it
logger.debug(f'Session of command {session.cmd.name} is expired')
if ctx_id in _sessions:
del _sessions[ctx_id] # depends on [control=['if'], data=['ctx_id', '_sessions']]
session = None # depends on [control=['if'], data=[]]
if not session:
if not cmd:
logger.debug('Not a known command, ignored')
return False # depends on [control=['if'], data=[]]
if cmd.only_to_me and (not ctx['to_me']):
logger.debug('Not to me, ignored')
return False # depends on [control=['if'], data=[]]
session = CommandSession(bot, ctx, cmd, current_arg=current_arg)
logger.debug(f'New session of command {session.cmd.name} created') # depends on [control=['if'], data=[]]
return await _real_run_command(session, ctx_id, check_perm=check_perm, disable_interaction=disable_interaction) |
def handle(self, data):
"""
puts the data in the target.
:param data: the data to post.
:return:
"""
self.dataResponseCode.append(self._doPut(self.sendURL + '/data', data=data)) | def function[handle, parameter[self, data]]:
constant[
puts the data in the target.
:param data: the data to post.
:return:
]
call[name[self].dataResponseCode.append, parameter[call[name[self]._doPut, parameter[binary_operation[name[self].sendURL + constant[/data]]]]]] | keyword[def] identifier[handle] ( identifier[self] , identifier[data] ):
literal[string]
identifier[self] . identifier[dataResponseCode] . identifier[append] ( identifier[self] . identifier[_doPut] ( identifier[self] . identifier[sendURL] + literal[string] , identifier[data] = identifier[data] )) | def handle(self, data):
"""
puts the data in the target.
:param data: the data to post.
:return:
"""
self.dataResponseCode.append(self._doPut(self.sendURL + '/data', data=data)) |
def dphi_fc(fdata):
"""Apply phi derivative in the Fourier domain."""
nrows = fdata.shape[0]
ncols = fdata.shape[1]
B = int(ncols / 2) # As always, we assume nrows and ncols are even
a = list(range(0, int(B)))
ap = list(range(-int(B), 0))
a.extend(ap)
dphi = np.zeros([nrows, ncols], np.complex128)
for k in xrange(0, nrows):
dphi[k, :] = a
fdata[:, :] = 1j * dphi * fdata | def function[dphi_fc, parameter[fdata]]:
constant[Apply phi derivative in the Fourier domain.]
variable[nrows] assign[=] call[name[fdata].shape][constant[0]]
variable[ncols] assign[=] call[name[fdata].shape][constant[1]]
variable[B] assign[=] call[name[int], parameter[binary_operation[name[ncols] / constant[2]]]]
variable[a] assign[=] call[name[list], parameter[call[name[range], parameter[constant[0], call[name[int], parameter[name[B]]]]]]]
variable[ap] assign[=] call[name[list], parameter[call[name[range], parameter[<ast.UnaryOp object at 0x7da18ede4940>, constant[0]]]]]
call[name[a].extend, parameter[name[ap]]]
variable[dphi] assign[=] call[name[np].zeros, parameter[list[[<ast.Name object at 0x7da207f03b50>, <ast.Name object at 0x7da207f01f60>]], name[np].complex128]]
for taget[name[k]] in starred[call[name[xrange], parameter[constant[0], name[nrows]]]] begin[:]
call[name[dphi]][tuple[[<ast.Name object at 0x7da18f09ec80>, <ast.Slice object at 0x7da18f09e8c0>]]] assign[=] name[a]
call[name[fdata]][tuple[[<ast.Slice object at 0x7da18f09ca60>, <ast.Slice object at 0x7da18f09d2d0>]]] assign[=] binary_operation[binary_operation[constant[1j] * name[dphi]] * name[fdata]] | keyword[def] identifier[dphi_fc] ( identifier[fdata] ):
literal[string]
identifier[nrows] = identifier[fdata] . identifier[shape] [ literal[int] ]
identifier[ncols] = identifier[fdata] . identifier[shape] [ literal[int] ]
identifier[B] = identifier[int] ( identifier[ncols] / literal[int] )
identifier[a] = identifier[list] ( identifier[range] ( literal[int] , identifier[int] ( identifier[B] )))
identifier[ap] = identifier[list] ( identifier[range] (- identifier[int] ( identifier[B] ), literal[int] ))
identifier[a] . identifier[extend] ( identifier[ap] )
identifier[dphi] = identifier[np] . identifier[zeros] ([ identifier[nrows] , identifier[ncols] ], identifier[np] . identifier[complex128] )
keyword[for] identifier[k] keyword[in] identifier[xrange] ( literal[int] , identifier[nrows] ):
identifier[dphi] [ identifier[k] ,:]= identifier[a]
identifier[fdata] [:,:]= literal[int] * identifier[dphi] * identifier[fdata] | def dphi_fc(fdata):
"""Apply phi derivative in the Fourier domain."""
nrows = fdata.shape[0]
ncols = fdata.shape[1]
B = int(ncols / 2) # As always, we assume nrows and ncols are even
a = list(range(0, int(B)))
ap = list(range(-int(B), 0))
a.extend(ap)
dphi = np.zeros([nrows, ncols], np.complex128)
for k in xrange(0, nrows):
dphi[k, :] = a # depends on [control=['for'], data=['k']]
fdata[:, :] = 1j * dphi * fdata |
def check_subscription(self, request):
"""Redirect to the subscribe page if the user lacks an active subscription."""
subscriber = subscriber_request_callback(request)
if not subscriber_has_active_subscription(subscriber):
if not SUBSCRIPTION_REDIRECT:
raise ImproperlyConfigured("DJSTRIPE_SUBSCRIPTION_REDIRECT is not set.")
return redirect(SUBSCRIPTION_REDIRECT) | def function[check_subscription, parameter[self, request]]:
constant[Redirect to the subscribe page if the user lacks an active subscription.]
variable[subscriber] assign[=] call[name[subscriber_request_callback], parameter[name[request]]]
if <ast.UnaryOp object at 0x7da204622680> begin[:]
if <ast.UnaryOp object at 0x7da2046229b0> begin[:]
<ast.Raise object at 0x7da204620910>
return[call[name[redirect], parameter[name[SUBSCRIPTION_REDIRECT]]]] | keyword[def] identifier[check_subscription] ( identifier[self] , identifier[request] ):
literal[string]
identifier[subscriber] = identifier[subscriber_request_callback] ( identifier[request] )
keyword[if] keyword[not] identifier[subscriber_has_active_subscription] ( identifier[subscriber] ):
keyword[if] keyword[not] identifier[SUBSCRIPTION_REDIRECT] :
keyword[raise] identifier[ImproperlyConfigured] ( literal[string] )
keyword[return] identifier[redirect] ( identifier[SUBSCRIPTION_REDIRECT] ) | def check_subscription(self, request):
"""Redirect to the subscribe page if the user lacks an active subscription."""
subscriber = subscriber_request_callback(request)
if not subscriber_has_active_subscription(subscriber):
if not SUBSCRIPTION_REDIRECT:
raise ImproperlyConfigured('DJSTRIPE_SUBSCRIPTION_REDIRECT is not set.') # depends on [control=['if'], data=[]]
return redirect(SUBSCRIPTION_REDIRECT) # depends on [control=['if'], data=[]] |
def update_discount_coupon_by_id(cls, discount_coupon_id, discount_coupon, **kwargs):
"""Update DiscountCoupon
Update attributes of DiscountCoupon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_discount_coupon_by_id(discount_coupon_id, discount_coupon, async=True)
>>> result = thread.get()
:param async bool
:param str discount_coupon_id: ID of discountCoupon to update. (required)
:param DiscountCoupon discount_coupon: Attributes of discountCoupon to update. (required)
:return: DiscountCoupon
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_discount_coupon_by_id_with_http_info(discount_coupon_id, discount_coupon, **kwargs)
else:
(data) = cls._update_discount_coupon_by_id_with_http_info(discount_coupon_id, discount_coupon, **kwargs)
return data | def function[update_discount_coupon_by_id, parameter[cls, discount_coupon_id, discount_coupon]]:
constant[Update DiscountCoupon
Update attributes of DiscountCoupon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_discount_coupon_by_id(discount_coupon_id, discount_coupon, async=True)
>>> result = thread.get()
:param async bool
:param str discount_coupon_id: ID of discountCoupon to update. (required)
:param DiscountCoupon discount_coupon: Attributes of discountCoupon to update. (required)
:return: DiscountCoupon
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._update_discount_coupon_by_id_with_http_info, parameter[name[discount_coupon_id], name[discount_coupon]]]] | keyword[def] identifier[update_discount_coupon_by_id] ( identifier[cls] , identifier[discount_coupon_id] , identifier[discount_coupon] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_update_discount_coupon_by_id_with_http_info] ( identifier[discount_coupon_id] , identifier[discount_coupon] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_update_discount_coupon_by_id_with_http_info] ( identifier[discount_coupon_id] , identifier[discount_coupon] ,** identifier[kwargs] )
keyword[return] identifier[data] | def update_discount_coupon_by_id(cls, discount_coupon_id, discount_coupon, **kwargs):
"""Update DiscountCoupon
Update attributes of DiscountCoupon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_discount_coupon_by_id(discount_coupon_id, discount_coupon, async=True)
>>> result = thread.get()
:param async bool
:param str discount_coupon_id: ID of discountCoupon to update. (required)
:param DiscountCoupon discount_coupon: Attributes of discountCoupon to update. (required)
:return: DiscountCoupon
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_discount_coupon_by_id_with_http_info(discount_coupon_id, discount_coupon, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._update_discount_coupon_by_id_with_http_info(discount_coupon_id, discount_coupon, **kwargs)
return data |
def add_project(self, path):
"""
Adds a project.
:param path: Project path.
:type path: unicode
:return: Method success.
:rtype: bool
"""
if not foundations.common.path_exists(path):
return False
path = os.path.normpath(path)
if self.__model.get_project_nodes(path):
self.__engine.notifications_manager.warnify(
"{0} | '{1}' project is already opened!".format(self.__class__.__name__, path))
return False
LOGGER.info("{0} | Adding '{1}' project!".format(self.__class__.__name__, path))
project_node = self.__model.register_project(path)
if not project_node:
return False
self.__model.set_project_nodes(project_node)
return True | def function[add_project, parameter[self, path]]:
constant[
Adds a project.
:param path: Project path.
:type path: unicode
:return: Method success.
:rtype: bool
]
if <ast.UnaryOp object at 0x7da18bccbfd0> begin[:]
return[constant[False]]
variable[path] assign[=] call[name[os].path.normpath, parameter[name[path]]]
if call[name[self].__model.get_project_nodes, parameter[name[path]]] begin[:]
call[name[self].__engine.notifications_manager.warnify, parameter[call[constant[{0} | '{1}' project is already opened!].format, parameter[name[self].__class__.__name__, name[path]]]]]
return[constant[False]]
call[name[LOGGER].info, parameter[call[constant[{0} | Adding '{1}' project!].format, parameter[name[self].__class__.__name__, name[path]]]]]
variable[project_node] assign[=] call[name[self].__model.register_project, parameter[name[path]]]
if <ast.UnaryOp object at 0x7da18bccb3a0> begin[:]
return[constant[False]]
call[name[self].__model.set_project_nodes, parameter[name[project_node]]]
return[constant[True]] | keyword[def] identifier[add_project] ( identifier[self] , identifier[path] ):
literal[string]
keyword[if] keyword[not] identifier[foundations] . identifier[common] . identifier[path_exists] ( identifier[path] ):
keyword[return] keyword[False]
identifier[path] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[path] )
keyword[if] identifier[self] . identifier[__model] . identifier[get_project_nodes] ( identifier[path] ):
identifier[self] . identifier[__engine] . identifier[notifications_manager] . identifier[warnify] (
literal[string] . identifier[format] ( identifier[self] . identifier[__class__] . identifier[__name__] , identifier[path] ))
keyword[return] keyword[False]
identifier[LOGGER] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[__class__] . identifier[__name__] , identifier[path] ))
identifier[project_node] = identifier[self] . identifier[__model] . identifier[register_project] ( identifier[path] )
keyword[if] keyword[not] identifier[project_node] :
keyword[return] keyword[False]
identifier[self] . identifier[__model] . identifier[set_project_nodes] ( identifier[project_node] )
keyword[return] keyword[True] | def add_project(self, path):
"""
Adds a project.
:param path: Project path.
:type path: unicode
:return: Method success.
:rtype: bool
"""
if not foundations.common.path_exists(path):
return False # depends on [control=['if'], data=[]]
path = os.path.normpath(path)
if self.__model.get_project_nodes(path):
self.__engine.notifications_manager.warnify("{0} | '{1}' project is already opened!".format(self.__class__.__name__, path))
return False # depends on [control=['if'], data=[]]
LOGGER.info("{0} | Adding '{1}' project!".format(self.__class__.__name__, path))
project_node = self.__model.register_project(path)
if not project_node:
return False # depends on [control=['if'], data=[]]
self.__model.set_project_nodes(project_node)
return True |
def scan(context, root_dir):
"""Scan a directory for analyses."""
root_dir = root_dir or context.obj['root']
config_files = Path(root_dir).glob('*/analysis/*_config.yaml')
for config_file in config_files:
LOG.debug("found analysis config: %s", config_file)
with config_file.open() as stream:
context.invoke(log_cmd, config=stream, quiet=True)
context.obj['store'].track_update() | def function[scan, parameter[context, root_dir]]:
constant[Scan a directory for analyses.]
variable[root_dir] assign[=] <ast.BoolOp object at 0x7da20c7ca290>
variable[config_files] assign[=] call[call[name[Path], parameter[name[root_dir]]].glob, parameter[constant[*/analysis/*_config.yaml]]]
for taget[name[config_file]] in starred[name[config_files]] begin[:]
call[name[LOG].debug, parameter[constant[found analysis config: %s], name[config_file]]]
with call[name[config_file].open, parameter[]] begin[:]
call[name[context].invoke, parameter[name[log_cmd]]]
call[call[name[context].obj][constant[store]].track_update, parameter[]] | keyword[def] identifier[scan] ( identifier[context] , identifier[root_dir] ):
literal[string]
identifier[root_dir] = identifier[root_dir] keyword[or] identifier[context] . identifier[obj] [ literal[string] ]
identifier[config_files] = identifier[Path] ( identifier[root_dir] ). identifier[glob] ( literal[string] )
keyword[for] identifier[config_file] keyword[in] identifier[config_files] :
identifier[LOG] . identifier[debug] ( literal[string] , identifier[config_file] )
keyword[with] identifier[config_file] . identifier[open] () keyword[as] identifier[stream] :
identifier[context] . identifier[invoke] ( identifier[log_cmd] , identifier[config] = identifier[stream] , identifier[quiet] = keyword[True] )
identifier[context] . identifier[obj] [ literal[string] ]. identifier[track_update] () | def scan(context, root_dir):
"""Scan a directory for analyses."""
root_dir = root_dir or context.obj['root']
config_files = Path(root_dir).glob('*/analysis/*_config.yaml')
for config_file in config_files:
LOG.debug('found analysis config: %s', config_file)
with config_file.open() as stream:
context.invoke(log_cmd, config=stream, quiet=True) # depends on [control=['with'], data=['stream']] # depends on [control=['for'], data=['config_file']]
context.obj['store'].track_update() |
def _smallest_integer_by_dtype(dt):
"""Helper returning the smallest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if _is_known_unsigned_by_dtype(dt):
return 0
return -1 * _largest_integer_by_dtype(dt) | def function[_smallest_integer_by_dtype, parameter[dt]]:
constant[Helper returning the smallest integer exactly representable by dtype.]
if <ast.UnaryOp object at 0x7da1b02d1cf0> begin[:]
<ast.Raise object at 0x7da1b02d2110>
if call[name[_is_known_unsigned_by_dtype], parameter[name[dt]]] begin[:]
return[constant[0]]
return[binary_operation[<ast.UnaryOp object at 0x7da1b02d3820> * call[name[_largest_integer_by_dtype], parameter[name[dt]]]]] | keyword[def] identifier[_smallest_integer_by_dtype] ( identifier[dt] ):
literal[string]
keyword[if] keyword[not] identifier[_is_known_dtype] ( identifier[dt] ):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[dt] . identifier[name] ))
keyword[if] identifier[_is_known_unsigned_by_dtype] ( identifier[dt] ):
keyword[return] literal[int]
keyword[return] - literal[int] * identifier[_largest_integer_by_dtype] ( identifier[dt] ) | def _smallest_integer_by_dtype(dt):
"""Helper returning the smallest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError('Unrecognized dtype: {}'.format(dt.name)) # depends on [control=['if'], data=[]]
if _is_known_unsigned_by_dtype(dt):
return 0 # depends on [control=['if'], data=[]]
return -1 * _largest_integer_by_dtype(dt) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.