code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def info(self):
'''Return the header fields as a Message:
Returns:
Message: An instance of :class:`email.message.Message`. If
Python 2, returns an instance of :class:`mimetools.Message`.
'''
if sys.version_info[0] == 2:
return mimetools.Message(io.StringIO(str(self._response.fields)))
else:
return email.message_from_string(str(self._response.fields)) | def function[info, parameter[self]]:
constant[Return the header fields as a Message:
Returns:
Message: An instance of :class:`email.message.Message`. If
Python 2, returns an instance of :class:`mimetools.Message`.
]
if compare[call[name[sys].version_info][constant[0]] equal[==] constant[2]] begin[:]
return[call[name[mimetools].Message, parameter[call[name[io].StringIO, parameter[call[name[str], parameter[name[self]._response.fields]]]]]]] | keyword[def] identifier[info] ( identifier[self] ):
literal[string]
keyword[if] identifier[sys] . identifier[version_info] [ literal[int] ]== literal[int] :
keyword[return] identifier[mimetools] . identifier[Message] ( identifier[io] . identifier[StringIO] ( identifier[str] ( identifier[self] . identifier[_response] . identifier[fields] )))
keyword[else] :
keyword[return] identifier[email] . identifier[message_from_string] ( identifier[str] ( identifier[self] . identifier[_response] . identifier[fields] )) | def info(self):
"""Return the header fields as a Message:
Returns:
Message: An instance of :class:`email.message.Message`. If
Python 2, returns an instance of :class:`mimetools.Message`.
"""
if sys.version_info[0] == 2:
return mimetools.Message(io.StringIO(str(self._response.fields))) # depends on [control=['if'], data=[]]
else:
return email.message_from_string(str(self._response.fields)) |
def downloadDatabases(self, savepath=None, unpack=False):
""" Download databases.
Parameters:
savepath (str): Defaults to current working dir.
unpack (bool): Unpack the zip file.
"""
url = self.url('/diagnostics/databases')
filepath = utils.download(url, self._token, None, savepath, self._session, unpack=unpack)
return filepath | def function[downloadDatabases, parameter[self, savepath, unpack]]:
constant[ Download databases.
Parameters:
savepath (str): Defaults to current working dir.
unpack (bool): Unpack the zip file.
]
variable[url] assign[=] call[name[self].url, parameter[constant[/diagnostics/databases]]]
variable[filepath] assign[=] call[name[utils].download, parameter[name[url], name[self]._token, constant[None], name[savepath], name[self]._session]]
return[name[filepath]] | keyword[def] identifier[downloadDatabases] ( identifier[self] , identifier[savepath] = keyword[None] , identifier[unpack] = keyword[False] ):
literal[string]
identifier[url] = identifier[self] . identifier[url] ( literal[string] )
identifier[filepath] = identifier[utils] . identifier[download] ( identifier[url] , identifier[self] . identifier[_token] , keyword[None] , identifier[savepath] , identifier[self] . identifier[_session] , identifier[unpack] = identifier[unpack] )
keyword[return] identifier[filepath] | def downloadDatabases(self, savepath=None, unpack=False):
""" Download databases.
Parameters:
savepath (str): Defaults to current working dir.
unpack (bool): Unpack the zip file.
"""
url = self.url('/diagnostics/databases')
filepath = utils.download(url, self._token, None, savepath, self._session, unpack=unpack)
return filepath |
def _retrieve(self, uid):
"""Return a dict with the contents of the paste, including the raw
data, if any, as the key 'data'. Must pass in uid, not shortid."""
query = dict(uid=uid)
doc = self.db.pastes.find_one(query)
if 'data_id' in doc:
data_id = doc.pop('data_id')
gfs = gridfs.GridFS(self.db)
doc.update(data=gfs.get(data_id).read())
return doc | def function[_retrieve, parameter[self, uid]]:
constant[Return a dict with the contents of the paste, including the raw
data, if any, as the key 'data'. Must pass in uid, not shortid.]
variable[query] assign[=] call[name[dict], parameter[]]
variable[doc] assign[=] call[name[self].db.pastes.find_one, parameter[name[query]]]
if compare[constant[data_id] in name[doc]] begin[:]
variable[data_id] assign[=] call[name[doc].pop, parameter[constant[data_id]]]
variable[gfs] assign[=] call[name[gridfs].GridFS, parameter[name[self].db]]
call[name[doc].update, parameter[]]
return[name[doc]] | keyword[def] identifier[_retrieve] ( identifier[self] , identifier[uid] ):
literal[string]
identifier[query] = identifier[dict] ( identifier[uid] = identifier[uid] )
identifier[doc] = identifier[self] . identifier[db] . identifier[pastes] . identifier[find_one] ( identifier[query] )
keyword[if] literal[string] keyword[in] identifier[doc] :
identifier[data_id] = identifier[doc] . identifier[pop] ( literal[string] )
identifier[gfs] = identifier[gridfs] . identifier[GridFS] ( identifier[self] . identifier[db] )
identifier[doc] . identifier[update] ( identifier[data] = identifier[gfs] . identifier[get] ( identifier[data_id] ). identifier[read] ())
keyword[return] identifier[doc] | def _retrieve(self, uid):
"""Return a dict with the contents of the paste, including the raw
data, if any, as the key 'data'. Must pass in uid, not shortid."""
query = dict(uid=uid)
doc = self.db.pastes.find_one(query)
if 'data_id' in doc:
data_id = doc.pop('data_id')
gfs = gridfs.GridFS(self.db)
doc.update(data=gfs.get(data_id).read()) # depends on [control=['if'], data=['doc']]
return doc |
def isConnected(self, fromName, toName):
""" Are these two layers connected this way? """
for c in self.connections:
if (c.fromLayer.name == fromName and
c.toLayer.name == toName):
return 1
return 0 | def function[isConnected, parameter[self, fromName, toName]]:
constant[ Are these two layers connected this way? ]
for taget[name[c]] in starred[name[self].connections] begin[:]
if <ast.BoolOp object at 0x7da1b035a530> begin[:]
return[constant[1]]
return[constant[0]] | keyword[def] identifier[isConnected] ( identifier[self] , identifier[fromName] , identifier[toName] ):
literal[string]
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[connections] :
keyword[if] ( identifier[c] . identifier[fromLayer] . identifier[name] == identifier[fromName] keyword[and]
identifier[c] . identifier[toLayer] . identifier[name] == identifier[toName] ):
keyword[return] literal[int]
keyword[return] literal[int] | def isConnected(self, fromName, toName):
""" Are these two layers connected this way? """
for c in self.connections:
if c.fromLayer.name == fromName and c.toLayer.name == toName:
return 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
return 0 |
def end_day_to_datetime(end_day, config):
"""
Convert a given end day to its proper datetime.
This is non trivial because of variable ``day_start``. We want to make sure
that even if an 'end day' is specified the actual point in time may reach into the following
day.
Args:
end (datetime.date): Raw end date that is to be adjusted.
config: Controller config containing information on when a workday starts.
Returns:
datetime.datetime: The endday as a adjusted datetime object.
Example:
Given a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to
consider even points in time up to ``2015-04-02 5:29``. That is to represent that a
*work day*
does not match *calendar days*.
Note:
An alternative implementation for the similar problem in legacy hamster:
``hamster.storage.db.Storage.__get_todays_facts``.
"""
day_start_time = config['day_start']
day_end_time = get_day_end(config)
if day_start_time == datetime.time(0, 0, 0):
end = datetime.datetime.combine(end_day, day_end_time)
else:
end = datetime.datetime.combine(end_day, day_end_time) + datetime.timedelta(days=1)
return end | def function[end_day_to_datetime, parameter[end_day, config]]:
constant[
Convert a given end day to its proper datetime.
This is non trivial because of variable ``day_start``. We want to make sure
that even if an 'end day' is specified the actual point in time may reach into the following
day.
Args:
end (datetime.date): Raw end date that is to be adjusted.
config: Controller config containing information on when a workday starts.
Returns:
datetime.datetime: The endday as a adjusted datetime object.
Example:
Given a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to
consider even points in time up to ``2015-04-02 5:29``. That is to represent that a
*work day*
does not match *calendar days*.
Note:
An alternative implementation for the similar problem in legacy hamster:
``hamster.storage.db.Storage.__get_todays_facts``.
]
variable[day_start_time] assign[=] call[name[config]][constant[day_start]]
variable[day_end_time] assign[=] call[name[get_day_end], parameter[name[config]]]
if compare[name[day_start_time] equal[==] call[name[datetime].time, parameter[constant[0], constant[0], constant[0]]]] begin[:]
variable[end] assign[=] call[name[datetime].datetime.combine, parameter[name[end_day], name[day_end_time]]]
return[name[end]] | keyword[def] identifier[end_day_to_datetime] ( identifier[end_day] , identifier[config] ):
literal[string]
identifier[day_start_time] = identifier[config] [ literal[string] ]
identifier[day_end_time] = identifier[get_day_end] ( identifier[config] )
keyword[if] identifier[day_start_time] == identifier[datetime] . identifier[time] ( literal[int] , literal[int] , literal[int] ):
identifier[end] = identifier[datetime] . identifier[datetime] . identifier[combine] ( identifier[end_day] , identifier[day_end_time] )
keyword[else] :
identifier[end] = identifier[datetime] . identifier[datetime] . identifier[combine] ( identifier[end_day] , identifier[day_end_time] )+ identifier[datetime] . identifier[timedelta] ( identifier[days] = literal[int] )
keyword[return] identifier[end] | def end_day_to_datetime(end_day, config):
"""
Convert a given end day to its proper datetime.
This is non trivial because of variable ``day_start``. We want to make sure
that even if an 'end day' is specified the actual point in time may reach into the following
day.
Args:
end (datetime.date): Raw end date that is to be adjusted.
config: Controller config containing information on when a workday starts.
Returns:
datetime.datetime: The endday as a adjusted datetime object.
Example:
Given a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to
consider even points in time up to ``2015-04-02 5:29``. That is to represent that a
*work day*
does not match *calendar days*.
Note:
An alternative implementation for the similar problem in legacy hamster:
``hamster.storage.db.Storage.__get_todays_facts``.
"""
day_start_time = config['day_start']
day_end_time = get_day_end(config)
if day_start_time == datetime.time(0, 0, 0):
end = datetime.datetime.combine(end_day, day_end_time) # depends on [control=['if'], data=[]]
else:
end = datetime.datetime.combine(end_day, day_end_time) + datetime.timedelta(days=1)
return end |
def p_gate_op_5(self, program):
"""
gate_op : BARRIER id_list ';'
"""
program[0] = node.Barrier([program[2]])
self.verify_bit_list(program[2])
self.verify_distinct([program[2]]) | def function[p_gate_op_5, parameter[self, program]]:
constant[
gate_op : BARRIER id_list ';'
]
call[name[program]][constant[0]] assign[=] call[name[node].Barrier, parameter[list[[<ast.Subscript object at 0x7da1b03a8970>]]]]
call[name[self].verify_bit_list, parameter[call[name[program]][constant[2]]]]
call[name[self].verify_distinct, parameter[list[[<ast.Subscript object at 0x7da1b03a8c10>]]]] | keyword[def] identifier[p_gate_op_5] ( identifier[self] , identifier[program] ):
literal[string]
identifier[program] [ literal[int] ]= identifier[node] . identifier[Barrier] ([ identifier[program] [ literal[int] ]])
identifier[self] . identifier[verify_bit_list] ( identifier[program] [ literal[int] ])
identifier[self] . identifier[verify_distinct] ([ identifier[program] [ literal[int] ]]) | def p_gate_op_5(self, program):
"""
gate_op : BARRIER id_list ';'
"""
program[0] = node.Barrier([program[2]])
self.verify_bit_list(program[2])
self.verify_distinct([program[2]]) |
def systemCTypeOfSig(signalItem):
"""
Check if is register or wire
"""
if signalItem._const or\
arr_any(signalItem.drivers,
lambda d: isinstance(d, HdlStatement)
and d._now_is_event_dependent):
return SIGNAL_TYPE.REG
else:
return SIGNAL_TYPE.WIRE | def function[systemCTypeOfSig, parameter[signalItem]]:
constant[
Check if is register or wire
]
if <ast.BoolOp object at 0x7da1b05c6d40> begin[:]
return[name[SIGNAL_TYPE].REG] | keyword[def] identifier[systemCTypeOfSig] ( identifier[signalItem] ):
literal[string]
keyword[if] identifier[signalItem] . identifier[_const] keyword[or] identifier[arr_any] ( identifier[signalItem] . identifier[drivers] ,
keyword[lambda] identifier[d] : identifier[isinstance] ( identifier[d] , identifier[HdlStatement] )
keyword[and] identifier[d] . identifier[_now_is_event_dependent] ):
keyword[return] identifier[SIGNAL_TYPE] . identifier[REG]
keyword[else] :
keyword[return] identifier[SIGNAL_TYPE] . identifier[WIRE] | def systemCTypeOfSig(signalItem):
"""
Check if is register or wire
"""
if signalItem._const or arr_any(signalItem.drivers, lambda d: isinstance(d, HdlStatement) and d._now_is_event_dependent):
return SIGNAL_TYPE.REG # depends on [control=['if'], data=[]]
else:
return SIGNAL_TYPE.WIRE |
def constant(func):
"""Decorate a function so that the result is a constant value.
Functions wraped by this decorator will be run just one time.
The computational result will be stored and reused for any other input.
To store each result for each input, use :func:`memoized` instead.
"""
@wraps(func)
def _(*args, **kwargs):
"""The decorated function.
"""
if not _.res:
_.res = func(*args, **kwargs)
return _.res
_.res = None
return _ | def function[constant, parameter[func]]:
constant[Decorate a function so that the result is a constant value.
Functions wraped by this decorator will be run just one time.
The computational result will be stored and reused for any other input.
To store each result for each input, use :func:`memoized` instead.
]
def function[_, parameter[]]:
constant[The decorated function.
]
if <ast.UnaryOp object at 0x7da1b228e8c0> begin[:]
name[_].res assign[=] call[name[func], parameter[<ast.Starred object at 0x7da1b228e6e0>]]
return[name[_].res]
name[_].res assign[=] constant[None]
return[name[_]] | keyword[def] identifier[constant] ( identifier[func] ):
literal[string]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[_] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[_] . identifier[res] :
identifier[_] . identifier[res] = identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[_] . identifier[res]
identifier[_] . identifier[res] = keyword[None]
keyword[return] identifier[_] | def constant(func):
"""Decorate a function so that the result is a constant value.
Functions wraped by this decorator will be run just one time.
The computational result will be stored and reused for any other input.
To store each result for each input, use :func:`memoized` instead.
"""
@wraps(func)
def _(*args, **kwargs):
"""The decorated function.
"""
if not _.res:
_.res = func(*args, **kwargs) # depends on [control=['if'], data=[]]
return _.res
_.res = None
return _ |
def get_var(self, key):
'Retrieve one saved variable from the database.'
vt = quote(self.__vars_table)
data = self.execute(u'SELECT * FROM %s WHERE `key` = ?' % vt, [key], commit = False)
if data == []:
raise NameError(u'The DumpTruck variables table doesn\'t have a value for %s.' % key)
else:
tmp = quote(self.__vars_table_tmp)
row = data[0]
self.execute(u'DROP TABLE IF EXISTS %s' % tmp, commit = False)
# This is vulnerable to injection
self.execute(u'CREATE TEMPORARY TABLE %s (`value` %s)' % (tmp, row['type']), commit = False)
# This is ugly
self.execute(u'INSERT INTO %s (`value`) VALUES (?)' % tmp, [row['value']], commit = False)
value = self.dump(tmp)[0]['value']
self.execute(u'DROP TABLE %s' % tmp, commit = False)
return value | def function[get_var, parameter[self, key]]:
constant[Retrieve one saved variable from the database.]
variable[vt] assign[=] call[name[quote], parameter[name[self].__vars_table]]
variable[data] assign[=] call[name[self].execute, parameter[binary_operation[constant[SELECT * FROM %s WHERE `key` = ?] <ast.Mod object at 0x7da2590d6920> name[vt]], list[[<ast.Name object at 0x7da20c9927a0>]]]]
if compare[name[data] equal[==] list[[]]] begin[:]
<ast.Raise object at 0x7da20c993bb0> | keyword[def] identifier[get_var] ( identifier[self] , identifier[key] ):
literal[string]
identifier[vt] = identifier[quote] ( identifier[self] . identifier[__vars_table] )
identifier[data] = identifier[self] . identifier[execute] ( literal[string] % identifier[vt] ,[ identifier[key] ], identifier[commit] = keyword[False] )
keyword[if] identifier[data] ==[]:
keyword[raise] identifier[NameError] ( literal[string] % identifier[key] )
keyword[else] :
identifier[tmp] = identifier[quote] ( identifier[self] . identifier[__vars_table_tmp] )
identifier[row] = identifier[data] [ literal[int] ]
identifier[self] . identifier[execute] ( literal[string] % identifier[tmp] , identifier[commit] = keyword[False] )
identifier[self] . identifier[execute] ( literal[string] %( identifier[tmp] , identifier[row] [ literal[string] ]), identifier[commit] = keyword[False] )
identifier[self] . identifier[execute] ( literal[string] % identifier[tmp] ,[ identifier[row] [ literal[string] ]], identifier[commit] = keyword[False] )
identifier[value] = identifier[self] . identifier[dump] ( identifier[tmp] )[ literal[int] ][ literal[string] ]
identifier[self] . identifier[execute] ( literal[string] % identifier[tmp] , identifier[commit] = keyword[False] )
keyword[return] identifier[value] | def get_var(self, key):
"""Retrieve one saved variable from the database."""
vt = quote(self.__vars_table)
data = self.execute(u'SELECT * FROM %s WHERE `key` = ?' % vt, [key], commit=False)
if data == []:
raise NameError(u"The DumpTruck variables table doesn't have a value for %s." % key) # depends on [control=['if'], data=[]]
else:
tmp = quote(self.__vars_table_tmp)
row = data[0]
self.execute(u'DROP TABLE IF EXISTS %s' % tmp, commit=False)
# This is vulnerable to injection
self.execute(u'CREATE TEMPORARY TABLE %s (`value` %s)' % (tmp, row['type']), commit=False)
# This is ugly
self.execute(u'INSERT INTO %s (`value`) VALUES (?)' % tmp, [row['value']], commit=False)
value = self.dump(tmp)[0]['value']
self.execute(u'DROP TABLE %s' % tmp, commit=False)
return value |
def load_from_cache(**kwargs):
'''
:param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project"
:raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables
:returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object
Searches for a data object in the project cache container matching
the given keyword arguments. If found, the object will be cloned
into the running job's workspace container, and the handler for it
will be returned.
Example::
@dxpy.entry_point('main')
def main(*args, **kwargs):
x = load_from_cache(name="Indexed genome", classname='file')
if x is None:
x = compute_result(*args)
save_to_cache(x)
'''
if 'project' in kwargs:
raise DXError('Unexpected kwarg: "project"')
if dxpy.JOB_ID is None:
raise DXError('Not called by a job')
if 'DX_PROJECT_CACHE_ID' not in os.environ:
raise DXError('Project cache ID could not be found in the environment variable DX_PROJECT_CACHE_ID')
kwargs['project'] = os.environ.get('DX_PROJECT_CACHE_ID')
kwargs['return_handler'] = True
cached_object = find_one_data_object(**kwargs)
if cached_object is None:
return None
return cached_object.clone(dxpy.WORKSPACE_ID) | def function[load_from_cache, parameter[]]:
constant[
:param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project"
:raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables
:returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object
Searches for a data object in the project cache container matching
the given keyword arguments. If found, the object will be cloned
into the running job's workspace container, and the handler for it
will be returned.
Example::
@dxpy.entry_point('main')
def main(*args, **kwargs):
x = load_from_cache(name="Indexed genome", classname='file')
if x is None:
x = compute_result(*args)
save_to_cache(x)
]
if compare[constant[project] in name[kwargs]] begin[:]
<ast.Raise object at 0x7da204962b00>
if compare[name[dxpy].JOB_ID is constant[None]] begin[:]
<ast.Raise object at 0x7da204960d90>
if compare[constant[DX_PROJECT_CACHE_ID] <ast.NotIn object at 0x7da2590d7190> name[os].environ] begin[:]
<ast.Raise object at 0x7da204960880>
call[name[kwargs]][constant[project]] assign[=] call[name[os].environ.get, parameter[constant[DX_PROJECT_CACHE_ID]]]
call[name[kwargs]][constant[return_handler]] assign[=] constant[True]
variable[cached_object] assign[=] call[name[find_one_data_object], parameter[]]
if compare[name[cached_object] is constant[None]] begin[:]
return[constant[None]]
return[call[name[cached_object].clone, parameter[name[dxpy].WORKSPACE_ID]]] | keyword[def] identifier[load_from_cache] (** identifier[kwargs] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
keyword[raise] identifier[DXError] ( literal[string] )
keyword[if] identifier[dxpy] . identifier[JOB_ID] keyword[is] keyword[None] :
keyword[raise] identifier[DXError] ( literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[os] . identifier[environ] :
keyword[raise] identifier[DXError] ( literal[string] )
identifier[kwargs] [ literal[string] ]= identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
identifier[kwargs] [ literal[string] ]= keyword[True]
identifier[cached_object] = identifier[find_one_data_object] (** identifier[kwargs] )
keyword[if] identifier[cached_object] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[return] identifier[cached_object] . identifier[clone] ( identifier[dxpy] . identifier[WORKSPACE_ID] ) | def load_from_cache(**kwargs):
"""
:param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project"
:raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables
:returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object
Searches for a data object in the project cache container matching
the given keyword arguments. If found, the object will be cloned
into the running job's workspace container, and the handler for it
will be returned.
Example::
@dxpy.entry_point('main')
def main(*args, **kwargs):
x = load_from_cache(name="Indexed genome", classname='file')
if x is None:
x = compute_result(*args)
save_to_cache(x)
"""
if 'project' in kwargs:
raise DXError('Unexpected kwarg: "project"') # depends on [control=['if'], data=[]]
if dxpy.JOB_ID is None:
raise DXError('Not called by a job') # depends on [control=['if'], data=[]]
if 'DX_PROJECT_CACHE_ID' not in os.environ:
raise DXError('Project cache ID could not be found in the environment variable DX_PROJECT_CACHE_ID') # depends on [control=['if'], data=[]]
kwargs['project'] = os.environ.get('DX_PROJECT_CACHE_ID')
kwargs['return_handler'] = True
cached_object = find_one_data_object(**kwargs)
if cached_object is None:
return None # depends on [control=['if'], data=[]]
return cached_object.clone(dxpy.WORKSPACE_ID) |
def bracket_split(source, brackets=('()', '{}', '[]'), strip=False):
"""DOES NOT RETURN EMPTY STRINGS (can only return empty bracket content if strip=True)"""
starts = [e[0] for e in brackets]
in_bracket = 0
n = 0
last = 0
while n < len(source):
e = source[n]
if not in_bracket and e in starts:
in_bracket = 1
start = n
b_start, b_end = brackets[starts.index(e)]
elif in_bracket:
if e == b_start:
in_bracket += 1
elif e == b_end:
in_bracket -= 1
if not in_bracket:
if source[last:start]:
yield source[last:start]
last = n + 1
yield source[start + strip:n + 1 - strip]
n += 1
if source[last:]:
yield source[last:] | def function[bracket_split, parameter[source, brackets, strip]]:
constant[DOES NOT RETURN EMPTY STRINGS (can only return empty bracket content if strip=True)]
variable[starts] assign[=] <ast.ListComp object at 0x7da20c6e6f80>
variable[in_bracket] assign[=] constant[0]
variable[n] assign[=] constant[0]
variable[last] assign[=] constant[0]
while compare[name[n] less[<] call[name[len], parameter[name[source]]]] begin[:]
variable[e] assign[=] call[name[source]][name[n]]
if <ast.BoolOp object at 0x7da20c6e6c50> begin[:]
variable[in_bracket] assign[=] constant[1]
variable[start] assign[=] name[n]
<ast.Tuple object at 0x7da18c4cc640> assign[=] call[name[brackets]][call[name[starts].index, parameter[name[e]]]]
<ast.AugAssign object at 0x7da20c76e080>
if call[name[source]][<ast.Slice object at 0x7da20c76eef0>] begin[:]
<ast.Yield object at 0x7da20c76d300> | keyword[def] identifier[bracket_split] ( identifier[source] , identifier[brackets] =( literal[string] , literal[string] , literal[string] ), identifier[strip] = keyword[False] ):
literal[string]
identifier[starts] =[ identifier[e] [ literal[int] ] keyword[for] identifier[e] keyword[in] identifier[brackets] ]
identifier[in_bracket] = literal[int]
identifier[n] = literal[int]
identifier[last] = literal[int]
keyword[while] identifier[n] < identifier[len] ( identifier[source] ):
identifier[e] = identifier[source] [ identifier[n] ]
keyword[if] keyword[not] identifier[in_bracket] keyword[and] identifier[e] keyword[in] identifier[starts] :
identifier[in_bracket] = literal[int]
identifier[start] = identifier[n]
identifier[b_start] , identifier[b_end] = identifier[brackets] [ identifier[starts] . identifier[index] ( identifier[e] )]
keyword[elif] identifier[in_bracket] :
keyword[if] identifier[e] == identifier[b_start] :
identifier[in_bracket] += literal[int]
keyword[elif] identifier[e] == identifier[b_end] :
identifier[in_bracket] -= literal[int]
keyword[if] keyword[not] identifier[in_bracket] :
keyword[if] identifier[source] [ identifier[last] : identifier[start] ]:
keyword[yield] identifier[source] [ identifier[last] : identifier[start] ]
identifier[last] = identifier[n] + literal[int]
keyword[yield] identifier[source] [ identifier[start] + identifier[strip] : identifier[n] + literal[int] - identifier[strip] ]
identifier[n] += literal[int]
keyword[if] identifier[source] [ identifier[last] :]:
keyword[yield] identifier[source] [ identifier[last] :] | def bracket_split(source, brackets=('()', '{}', '[]'), strip=False):
"""DOES NOT RETURN EMPTY STRINGS (can only return empty bracket content if strip=True)"""
starts = [e[0] for e in brackets]
in_bracket = 0
n = 0
last = 0
while n < len(source):
e = source[n]
if not in_bracket and e in starts:
in_bracket = 1
start = n
(b_start, b_end) = brackets[starts.index(e)] # depends on [control=['if'], data=[]]
elif in_bracket:
if e == b_start:
in_bracket += 1 # depends on [control=['if'], data=[]]
elif e == b_end:
in_bracket -= 1
if not in_bracket:
if source[last:start]:
yield source[last:start] # depends on [control=['if'], data=[]]
last = n + 1
yield source[start + strip:n + 1 - strip] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
n += 1 # depends on [control=['while'], data=['n']]
if source[last:]:
yield source[last:] # depends on [control=['if'], data=[]] |
def iter_tasks(matrix, names, groups):
"""Iterate tasks."""
# Build name index
name_index = dict([(task.get('name', ''), index) for index, task in enumerate(matrix)])
for index, task in enumerate(matrix):
name = task.get('name', '')
group = task.get('group', '')
hidden = task.get('hidden', False)
if names and name in names and index == name_index[name]:
yield task
elif groups and group in groups and not hidden:
yield task
elif not names and not groups and not hidden:
yield task | def function[iter_tasks, parameter[matrix, names, groups]]:
constant[Iterate tasks.]
variable[name_index] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da18c4cfb80>]]
for taget[tuple[[<ast.Name object at 0x7da18c4cc3d0>, <ast.Name object at 0x7da18c4cff70>]]] in starred[call[name[enumerate], parameter[name[matrix]]]] begin[:]
variable[name] assign[=] call[name[task].get, parameter[constant[name], constant[]]]
variable[group] assign[=] call[name[task].get, parameter[constant[group], constant[]]]
variable[hidden] assign[=] call[name[task].get, parameter[constant[hidden], constant[False]]]
if <ast.BoolOp object at 0x7da18c4cf7c0> begin[:]
<ast.Yield object at 0x7da18c4ce260> | keyword[def] identifier[iter_tasks] ( identifier[matrix] , identifier[names] , identifier[groups] ):
literal[string]
identifier[name_index] = identifier[dict] ([( identifier[task] . identifier[get] ( literal[string] , literal[string] ), identifier[index] ) keyword[for] identifier[index] , identifier[task] keyword[in] identifier[enumerate] ( identifier[matrix] )])
keyword[for] identifier[index] , identifier[task] keyword[in] identifier[enumerate] ( identifier[matrix] ):
identifier[name] = identifier[task] . identifier[get] ( literal[string] , literal[string] )
identifier[group] = identifier[task] . identifier[get] ( literal[string] , literal[string] )
identifier[hidden] = identifier[task] . identifier[get] ( literal[string] , keyword[False] )
keyword[if] identifier[names] keyword[and] identifier[name] keyword[in] identifier[names] keyword[and] identifier[index] == identifier[name_index] [ identifier[name] ]:
keyword[yield] identifier[task]
keyword[elif] identifier[groups] keyword[and] identifier[group] keyword[in] identifier[groups] keyword[and] keyword[not] identifier[hidden] :
keyword[yield] identifier[task]
keyword[elif] keyword[not] identifier[names] keyword[and] keyword[not] identifier[groups] keyword[and] keyword[not] identifier[hidden] :
keyword[yield] identifier[task] | def iter_tasks(matrix, names, groups):
"""Iterate tasks."""
# Build name index
name_index = dict([(task.get('name', ''), index) for (index, task) in enumerate(matrix)])
for (index, task) in enumerate(matrix):
name = task.get('name', '')
group = task.get('group', '')
hidden = task.get('hidden', False)
if names and name in names and (index == name_index[name]):
yield task # depends on [control=['if'], data=[]]
elif groups and group in groups and (not hidden):
yield task # depends on [control=['if'], data=[]]
elif not names and (not groups) and (not hidden):
yield task # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def set_xlim(self, xlim):
'''set new X bounds'''
if self.xlim_pipe is not None and self.xlim != xlim:
#print("send0: ", graph_count, xlim)
try:
self.xlim_pipe[0].send(xlim)
except IOError:
return False
self.xlim = xlim
return True | def function[set_xlim, parameter[self, xlim]]:
constant[set new X bounds]
if <ast.BoolOp object at 0x7da20c76fd30> begin[:]
<ast.Try object at 0x7da20c76e3e0>
name[self].xlim assign[=] name[xlim]
return[constant[True]] | keyword[def] identifier[set_xlim] ( identifier[self] , identifier[xlim] ):
literal[string]
keyword[if] identifier[self] . identifier[xlim_pipe] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[xlim] != identifier[xlim] :
keyword[try] :
identifier[self] . identifier[xlim_pipe] [ literal[int] ]. identifier[send] ( identifier[xlim] )
keyword[except] identifier[IOError] :
keyword[return] keyword[False]
identifier[self] . identifier[xlim] = identifier[xlim]
keyword[return] keyword[True] | def set_xlim(self, xlim):
"""set new X bounds"""
if self.xlim_pipe is not None and self.xlim != xlim:
#print("send0: ", graph_count, xlim)
try:
self.xlim_pipe[0].send(xlim) # depends on [control=['try'], data=[]]
except IOError:
return False # depends on [control=['except'], data=[]]
self.xlim = xlim # depends on [control=['if'], data=[]]
return True |
def _format_report_line(self, test, time_taken, color, status, percent):
"""Format a single report line."""
return "[{0}] {3:04.2f}% {1}: {2}".format(
status, test, self._colored_time(time_taken, color), percent
) | def function[_format_report_line, parameter[self, test, time_taken, color, status, percent]]:
constant[Format a single report line.]
return[call[constant[[{0}] {3:04.2f}% {1}: {2}].format, parameter[name[status], name[test], call[name[self]._colored_time, parameter[name[time_taken], name[color]]], name[percent]]]] | keyword[def] identifier[_format_report_line] ( identifier[self] , identifier[test] , identifier[time_taken] , identifier[color] , identifier[status] , identifier[percent] ):
literal[string]
keyword[return] literal[string] . identifier[format] (
identifier[status] , identifier[test] , identifier[self] . identifier[_colored_time] ( identifier[time_taken] , identifier[color] ), identifier[percent]
) | def _format_report_line(self, test, time_taken, color, status, percent):
"""Format a single report line."""
return '[{0}] {3:04.2f}% {1}: {2}'.format(status, test, self._colored_time(time_taken, color), percent) |
def random_density_matrix(length, rank=None, method='Hilbert-Schmidt', seed=None):
"""Deprecated in 0.8+
"""
warnings.warn('The random_density_matrix() function in qiskit.tools.qi has been '
'deprecated and will be removed in the future. Instead use '
'the function in qiskit.quantum_info.random',
DeprecationWarning)
return random.random_density_matrix(length, rank, method, seed) | def function[random_density_matrix, parameter[length, rank, method, seed]]:
constant[Deprecated in 0.8+
]
call[name[warnings].warn, parameter[constant[The random_density_matrix() function in qiskit.tools.qi has been deprecated and will be removed in the future. Instead use the function in qiskit.quantum_info.random], name[DeprecationWarning]]]
return[call[name[random].random_density_matrix, parameter[name[length], name[rank], name[method], name[seed]]]] | keyword[def] identifier[random_density_matrix] ( identifier[length] , identifier[rank] = keyword[None] , identifier[method] = literal[string] , identifier[seed] = keyword[None] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string]
literal[string]
literal[string] ,
identifier[DeprecationWarning] )
keyword[return] identifier[random] . identifier[random_density_matrix] ( identifier[length] , identifier[rank] , identifier[method] , identifier[seed] ) | def random_density_matrix(length, rank=None, method='Hilbert-Schmidt', seed=None):
"""Deprecated in 0.8+
"""
warnings.warn('The random_density_matrix() function in qiskit.tools.qi has been deprecated and will be removed in the future. Instead use the function in qiskit.quantum_info.random', DeprecationWarning)
return random.random_density_matrix(length, rank, method, seed) |
def polarization_vector(phi, theta, alpha, beta, p,
numeric=False, abstract=False):
"""This function returns a unitary vector describing the polarization
of plane waves.:
INPUT:
- ``phi`` - The spherical coordinates azimuthal angle of the wave vector\
k.
- ``theta`` - The spherical coordinates polar angle of the wave vector k.
- ``alpha`` - The rotation of a half-wave plate.
- ``beta`` - The rotation of a quarter-wave plate.
- ``p`` - either 1 or -1 to indicate whether to return epsilon^(+) or\
epsilon^(-) respectively.
If alpha and beta are zero, the result will be linearly polarized light
along some fast axis. alpha and beta are measured from that fast axis.
Propagation towards y, linear polarization (for pi transitions):
>>> from sympy import pi
>>> polarization_vector(phi=pi/2, theta=pi/2, alpha=pi/2, beta= 0,p=1)
Matrix([
[0],
[0],
[1]])
Propagation towards +z, circular polarization (for sigma + transitions):
>>> polarization_vector(phi=0, theta= 0, alpha=pi/2, beta= pi/8,p=1)
Matrix([
[ -sqrt(2)/2],
[-sqrt(2)*I/2],
[ 0]])
Propagation towards -z, circular polarization for sigma + transitions:
>>> polarization_vector(phi=0, theta=pi, alpha= 0, beta=-pi/8,p=1)
Matrix([
[ -sqrt(2)/2],
[-sqrt(2)*I/2],
[ 0]])
Components + and - are complex conjugates of each other
>>> from sympy import symbols
>>> phi, theta, alpha, beta = symbols("phi theta alpha beta", real=True)
>>> ep = polarization_vector(phi,theta,alpha,beta, 1)
>>> em = polarization_vector(phi,theta,alpha,beta,-1)
>>> ep-em.conjugate()
Matrix([
[0],
[0],
[0]])
We can also define abstract polarization vectors without explicit \
components
>>> polarization_vector(0, 0, 0, 0, 1, abstract=True)
epsilonp
>>> polarization_vector(0, 0, 0, 0, -1, abstract=True)
epsilonm
"""
if abstract:
Nl = symbols("N_l", integer=True)
if p == 1:
epsilon = Vector3D(IndexedBase("epsilonp", shape=(Nl,)))
else:
epsilon = Vector3D(IndexedBase("epsilonm", shape=(Nl,)))
return epsilon
epsilon = Matrix([cos(2*beta), p*I*sin(2*beta), 0])
R1 = Matrix([[cos(2*alpha), -sin(2*alpha), 0],
[sin(2*alpha), cos(2*alpha), 0],
[0, 0, 1]])
R2 = Matrix([[cos(theta), 0, sin(theta)],
[0, 1, 0],
[-sin(theta), 0, cos(theta)]])
R3 = Matrix([[cos(phi), -sin(phi), 0],
[sin(phi), cos(phi), 0],
[0, 0, 1]])
epsilon = R3*R2*R1*epsilon
if numeric:
epsilon = nparray([complex(epsilon[i]) for i in range(3)])
return epsilon | def function[polarization_vector, parameter[phi, theta, alpha, beta, p, numeric, abstract]]:
constant[This function returns a unitary vector describing the polarization
of plane waves.:
INPUT:
- ``phi`` - The spherical coordinates azimuthal angle of the wave vector k.
- ``theta`` - The spherical coordinates polar angle of the wave vector k.
- ``alpha`` - The rotation of a half-wave plate.
- ``beta`` - The rotation of a quarter-wave plate.
- ``p`` - either 1 or -1 to indicate whether to return epsilon^(+) or epsilon^(-) respectively.
If alpha and beta are zero, the result will be linearly polarized light
along some fast axis. alpha and beta are measured from that fast axis.
Propagation towards y, linear polarization (for pi transitions):
>>> from sympy import pi
>>> polarization_vector(phi=pi/2, theta=pi/2, alpha=pi/2, beta= 0,p=1)
Matrix([
[0],
[0],
[1]])
Propagation towards +z, circular polarization (for sigma + transitions):
>>> polarization_vector(phi=0, theta= 0, alpha=pi/2, beta= pi/8,p=1)
Matrix([
[ -sqrt(2)/2],
[-sqrt(2)*I/2],
[ 0]])
Propagation towards -z, circular polarization for sigma + transitions:
>>> polarization_vector(phi=0, theta=pi, alpha= 0, beta=-pi/8,p=1)
Matrix([
[ -sqrt(2)/2],
[-sqrt(2)*I/2],
[ 0]])
Components + and - are complex conjugates of each other
>>> from sympy import symbols
>>> phi, theta, alpha, beta = symbols("phi theta alpha beta", real=True)
>>> ep = polarization_vector(phi,theta,alpha,beta, 1)
>>> em = polarization_vector(phi,theta,alpha,beta,-1)
>>> ep-em.conjugate()
Matrix([
[0],
[0],
[0]])
We can also define abstract polarization vectors without explicit components
>>> polarization_vector(0, 0, 0, 0, 1, abstract=True)
epsilonp
>>> polarization_vector(0, 0, 0, 0, -1, abstract=True)
epsilonm
]
if name[abstract] begin[:]
variable[Nl] assign[=] call[name[symbols], parameter[constant[N_l]]]
if compare[name[p] equal[==] constant[1]] begin[:]
variable[epsilon] assign[=] call[name[Vector3D], parameter[call[name[IndexedBase], parameter[constant[epsilonp]]]]]
return[name[epsilon]]
variable[epsilon] assign[=] call[name[Matrix], parameter[list[[<ast.Call object at 0x7da20c6aa620>, <ast.BinOp object at 0x7da20c6abbb0>, <ast.Constant object at 0x7da20c6a9780>]]]]
variable[R1] assign[=] call[name[Matrix], parameter[list[[<ast.List object at 0x7da20c6a81f0>, <ast.List object at 0x7da18ede60b0>, <ast.List object at 0x7da18ede4b80>]]]]
variable[R2] assign[=] call[name[Matrix], parameter[list[[<ast.List object at 0x7da18ede7580>, <ast.List object at 0x7da18ede4970>, <ast.List object at 0x7da18f58d150>]]]]
variable[R3] assign[=] call[name[Matrix], parameter[list[[<ast.List object at 0x7da18f58d6c0>, <ast.List object at 0x7da18f58db10>, <ast.List object at 0x7da18f58ec50>]]]]
variable[epsilon] assign[=] binary_operation[binary_operation[binary_operation[name[R3] * name[R2]] * name[R1]] * name[epsilon]]
if name[numeric] begin[:]
variable[epsilon] assign[=] call[name[nparray], parameter[<ast.ListComp object at 0x7da18f58ed10>]]
return[name[epsilon]] | keyword[def] identifier[polarization_vector] ( identifier[phi] , identifier[theta] , identifier[alpha] , identifier[beta] , identifier[p] ,
identifier[numeric] = keyword[False] , identifier[abstract] = keyword[False] ):
literal[string]
keyword[if] identifier[abstract] :
identifier[Nl] = identifier[symbols] ( literal[string] , identifier[integer] = keyword[True] )
keyword[if] identifier[p] == literal[int] :
identifier[epsilon] = identifier[Vector3D] ( identifier[IndexedBase] ( literal[string] , identifier[shape] =( identifier[Nl] ,)))
keyword[else] :
identifier[epsilon] = identifier[Vector3D] ( identifier[IndexedBase] ( literal[string] , identifier[shape] =( identifier[Nl] ,)))
keyword[return] identifier[epsilon]
identifier[epsilon] = identifier[Matrix] ([ identifier[cos] ( literal[int] * identifier[beta] ), identifier[p] * identifier[I] * identifier[sin] ( literal[int] * identifier[beta] ), literal[int] ])
identifier[R1] = identifier[Matrix] ([[ identifier[cos] ( literal[int] * identifier[alpha] ),- identifier[sin] ( literal[int] * identifier[alpha] ), literal[int] ],
[ identifier[sin] ( literal[int] * identifier[alpha] ), identifier[cos] ( literal[int] * identifier[alpha] ), literal[int] ],
[ literal[int] , literal[int] , literal[int] ]])
identifier[R2] = identifier[Matrix] ([[ identifier[cos] ( identifier[theta] ), literal[int] , identifier[sin] ( identifier[theta] )],
[ literal[int] , literal[int] , literal[int] ],
[- identifier[sin] ( identifier[theta] ), literal[int] , identifier[cos] ( identifier[theta] )]])
identifier[R3] = identifier[Matrix] ([[ identifier[cos] ( identifier[phi] ),- identifier[sin] ( identifier[phi] ), literal[int] ],
[ identifier[sin] ( identifier[phi] ), identifier[cos] ( identifier[phi] ), literal[int] ],
[ literal[int] , literal[int] , literal[int] ]])
identifier[epsilon] = identifier[R3] * identifier[R2] * identifier[R1] * identifier[epsilon]
keyword[if] identifier[numeric] :
identifier[epsilon] = identifier[nparray] ([ identifier[complex] ( identifier[epsilon] [ identifier[i] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )])
keyword[return] identifier[epsilon] | def polarization_vector(phi, theta, alpha, beta, p, numeric=False, abstract=False):
"""This function returns a unitary vector describing the polarization
of plane waves.:
INPUT:
- ``phi`` - The spherical coordinates azimuthal angle of the wave vector k.
- ``theta`` - The spherical coordinates polar angle of the wave vector k.
- ``alpha`` - The rotation of a half-wave plate.
- ``beta`` - The rotation of a quarter-wave plate.
- ``p`` - either 1 or -1 to indicate whether to return epsilon^(+) or epsilon^(-) respectively.
If alpha and beta are zero, the result will be linearly polarized light
along some fast axis. alpha and beta are measured from that fast axis.
Propagation towards y, linear polarization (for pi transitions):
>>> from sympy import pi
>>> polarization_vector(phi=pi/2, theta=pi/2, alpha=pi/2, beta= 0,p=1)
Matrix([
[0],
[0],
[1]])
Propagation towards +z, circular polarization (for sigma + transitions):
>>> polarization_vector(phi=0, theta= 0, alpha=pi/2, beta= pi/8,p=1)
Matrix([
[ -sqrt(2)/2],
[-sqrt(2)*I/2],
[ 0]])
Propagation towards -z, circular polarization for sigma + transitions:
>>> polarization_vector(phi=0, theta=pi, alpha= 0, beta=-pi/8,p=1)
Matrix([
[ -sqrt(2)/2],
[-sqrt(2)*I/2],
[ 0]])
Components + and - are complex conjugates of each other
>>> from sympy import symbols
>>> phi, theta, alpha, beta = symbols("phi theta alpha beta", real=True)
>>> ep = polarization_vector(phi,theta,alpha,beta, 1)
>>> em = polarization_vector(phi,theta,alpha,beta,-1)
>>> ep-em.conjugate()
Matrix([
[0],
[0],
[0]])
We can also define abstract polarization vectors without explicit components
>>> polarization_vector(0, 0, 0, 0, 1, abstract=True)
epsilonp
>>> polarization_vector(0, 0, 0, 0, -1, abstract=True)
epsilonm
"""
if abstract:
Nl = symbols('N_l', integer=True)
if p == 1:
epsilon = Vector3D(IndexedBase('epsilonp', shape=(Nl,))) # depends on [control=['if'], data=[]]
else:
epsilon = Vector3D(IndexedBase('epsilonm', shape=(Nl,)))
return epsilon # depends on [control=['if'], data=[]]
epsilon = Matrix([cos(2 * beta), p * I * sin(2 * beta), 0])
R1 = Matrix([[cos(2 * alpha), -sin(2 * alpha), 0], [sin(2 * alpha), cos(2 * alpha), 0], [0, 0, 1]])
R2 = Matrix([[cos(theta), 0, sin(theta)], [0, 1, 0], [-sin(theta), 0, cos(theta)]])
R3 = Matrix([[cos(phi), -sin(phi), 0], [sin(phi), cos(phi), 0], [0, 0, 1]])
epsilon = R3 * R2 * R1 * epsilon
if numeric:
epsilon = nparray([complex(epsilon[i]) for i in range(3)]) # depends on [control=['if'], data=[]]
return epsilon |
def find_lt(a, x):
"""Find rightmost value less than x"""
i = bisect.bisect_left(a, x)
if i:
return a[i-1]
raise ValueError | def function[find_lt, parameter[a, x]]:
constant[Find rightmost value less than x]
variable[i] assign[=] call[name[bisect].bisect_left, parameter[name[a], name[x]]]
if name[i] begin[:]
return[call[name[a]][binary_operation[name[i] - constant[1]]]]
<ast.Raise object at 0x7da20c991330> | keyword[def] identifier[find_lt] ( identifier[a] , identifier[x] ):
literal[string]
identifier[i] = identifier[bisect] . identifier[bisect_left] ( identifier[a] , identifier[x] )
keyword[if] identifier[i] :
keyword[return] identifier[a] [ identifier[i] - literal[int] ]
keyword[raise] identifier[ValueError] | def find_lt(a, x):
"""Find rightmost value less than x"""
i = bisect.bisect_left(a, x)
if i:
return a[i - 1] # depends on [control=['if'], data=[]]
raise ValueError |
def sort(self):
"""
Sort triggers and their associated responses
"""
# Sort triggers by word and character length first
for priority, triggers in self._triggers.items():
self._log.debug('Sorting priority {priority} triggers'.format(priority=priority))
# Get and sort our atomic and wildcard patterns
atomics = [trigger for trigger in triggers if trigger.pattern_is_atomic]
wildcards = [trigger for trigger in triggers if not trigger.pattern_is_atomic]
atomics = sorted(atomics, key=lambda trigger: (trigger.pattern_words, trigger.pattern_len), reverse=True)
wildcards = sorted(wildcards, key=lambda trigger: (trigger.pattern_words, trigger.pattern_len),
reverse=True)
# Replace our sorted triggers
self._triggers[priority] = atomics + wildcards
# Finally, sort triggers by priority
self._sorted_triggers = []
for triggers in [self._triggers[priority] for priority in sorted(self._triggers.keys(), reverse=True)]:
for trigger in triggers:
self._sorted_triggers.append(trigger)
self.sorted = True | def function[sort, parameter[self]]:
constant[
Sort triggers and their associated responses
]
for taget[tuple[[<ast.Name object at 0x7da1b164bbe0>, <ast.Name object at 0x7da1b1648970>]]] in starred[call[name[self]._triggers.items, parameter[]]] begin[:]
call[name[self]._log.debug, parameter[call[constant[Sorting priority {priority} triggers].format, parameter[]]]]
variable[atomics] assign[=] <ast.ListComp object at 0x7da1b1648a00>
variable[wildcards] assign[=] <ast.ListComp object at 0x7da1b1648e80>
variable[atomics] assign[=] call[name[sorted], parameter[name[atomics]]]
variable[wildcards] assign[=] call[name[sorted], parameter[name[wildcards]]]
call[name[self]._triggers][name[priority]] assign[=] binary_operation[name[atomics] + name[wildcards]]
name[self]._sorted_triggers assign[=] list[[]]
for taget[name[triggers]] in starred[<ast.ListComp object at 0x7da1b1454c40>] begin[:]
for taget[name[trigger]] in starred[name[triggers]] begin[:]
call[name[self]._sorted_triggers.append, parameter[name[trigger]]]
name[self].sorted assign[=] constant[True] | keyword[def] identifier[sort] ( identifier[self] ):
literal[string]
keyword[for] identifier[priority] , identifier[triggers] keyword[in] identifier[self] . identifier[_triggers] . identifier[items] ():
identifier[self] . identifier[_log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[priority] = identifier[priority] ))
identifier[atomics] =[ identifier[trigger] keyword[for] identifier[trigger] keyword[in] identifier[triggers] keyword[if] identifier[trigger] . identifier[pattern_is_atomic] ]
identifier[wildcards] =[ identifier[trigger] keyword[for] identifier[trigger] keyword[in] identifier[triggers] keyword[if] keyword[not] identifier[trigger] . identifier[pattern_is_atomic] ]
identifier[atomics] = identifier[sorted] ( identifier[atomics] , identifier[key] = keyword[lambda] identifier[trigger] :( identifier[trigger] . identifier[pattern_words] , identifier[trigger] . identifier[pattern_len] ), identifier[reverse] = keyword[True] )
identifier[wildcards] = identifier[sorted] ( identifier[wildcards] , identifier[key] = keyword[lambda] identifier[trigger] :( identifier[trigger] . identifier[pattern_words] , identifier[trigger] . identifier[pattern_len] ),
identifier[reverse] = keyword[True] )
identifier[self] . identifier[_triggers] [ identifier[priority] ]= identifier[atomics] + identifier[wildcards]
identifier[self] . identifier[_sorted_triggers] =[]
keyword[for] identifier[triggers] keyword[in] [ identifier[self] . identifier[_triggers] [ identifier[priority] ] keyword[for] identifier[priority] keyword[in] identifier[sorted] ( identifier[self] . identifier[_triggers] . identifier[keys] (), identifier[reverse] = keyword[True] )]:
keyword[for] identifier[trigger] keyword[in] identifier[triggers] :
identifier[self] . identifier[_sorted_triggers] . identifier[append] ( identifier[trigger] )
identifier[self] . identifier[sorted] = keyword[True] | def sort(self):
"""
Sort triggers and their associated responses
"""
# Sort triggers by word and character length first
for (priority, triggers) in self._triggers.items():
self._log.debug('Sorting priority {priority} triggers'.format(priority=priority))
# Get and sort our atomic and wildcard patterns
atomics = [trigger for trigger in triggers if trigger.pattern_is_atomic]
wildcards = [trigger for trigger in triggers if not trigger.pattern_is_atomic]
atomics = sorted(atomics, key=lambda trigger: (trigger.pattern_words, trigger.pattern_len), reverse=True)
wildcards = sorted(wildcards, key=lambda trigger: (trigger.pattern_words, trigger.pattern_len), reverse=True)
# Replace our sorted triggers
self._triggers[priority] = atomics + wildcards # depends on [control=['for'], data=[]]
# Finally, sort triggers by priority
self._sorted_triggers = []
for triggers in [self._triggers[priority] for priority in sorted(self._triggers.keys(), reverse=True)]:
for trigger in triggers:
self._sorted_triggers.append(trigger) # depends on [control=['for'], data=['trigger']] # depends on [control=['for'], data=['triggers']]
self.sorted = True |
def handle(self, key, value):
'''
Processes a vaild action info request
@param key: The key that matched the request
@param value: The value associated with the key
'''
# break down key
elements = key.split(":")
if len(elements) != 4:
self.logger.warn("Stop requests need a crawlid and appid")
return
spiderid = elements[1]
appid = elements[2]
crawlid = elements[3]
uuid = value
# log we received the stop message
extras = self.get_log_dict('stop', appid,
spiderid, uuid, crawlid)
self.logger.info('Received stop request', extra=extras)
redis_key = spiderid + ":blacklist"
value = '{appid}||{crawlid}'.format(appid=appid,
crawlid=crawlid)
# add this to the blacklist set
self.redis_conn.sadd(redis_key, value)
# purge crawlid from current set
result = self._purge_crawl(spiderid, appid, crawlid)
# item to send to kafka
extras = {}
extras['action'] = "stop"
extras['spiderid'] = spiderid
extras['appid'] = appid
extras['crawlid'] = crawlid
extras['total_purged'] = result
extras['uuid'] = uuid
extras['server_time'] = int(self.get_current_time())
if self._send_to_kafka(extras):
# delete timeout for crawl (if needed) since stopped
timeout_key = 'timeout:{sid}:{aid}:{cid}'.format(
sid=spiderid,
aid=appid,
cid=crawlid)
self.redis_conn.delete(timeout_key)
extras['success'] = True
self.logger.info('Sent stop ack to kafka', extra=extras)
else:
extras['success'] = False
self.logger.error('Failed to send stop ack to kafka', extra=extras) | def function[handle, parameter[self, key, value]]:
constant[
Processes a vaild action info request
@param key: The key that matched the request
@param value: The value associated with the key
]
variable[elements] assign[=] call[name[key].split, parameter[constant[:]]]
if compare[call[name[len], parameter[name[elements]]] not_equal[!=] constant[4]] begin[:]
call[name[self].logger.warn, parameter[constant[Stop requests need a crawlid and appid]]]
return[None]
variable[spiderid] assign[=] call[name[elements]][constant[1]]
variable[appid] assign[=] call[name[elements]][constant[2]]
variable[crawlid] assign[=] call[name[elements]][constant[3]]
variable[uuid] assign[=] name[value]
variable[extras] assign[=] call[name[self].get_log_dict, parameter[constant[stop], name[appid], name[spiderid], name[uuid], name[crawlid]]]
call[name[self].logger.info, parameter[constant[Received stop request]]]
variable[redis_key] assign[=] binary_operation[name[spiderid] + constant[:blacklist]]
variable[value] assign[=] call[constant[{appid}||{crawlid}].format, parameter[]]
call[name[self].redis_conn.sadd, parameter[name[redis_key], name[value]]]
variable[result] assign[=] call[name[self]._purge_crawl, parameter[name[spiderid], name[appid], name[crawlid]]]
variable[extras] assign[=] dictionary[[], []]
call[name[extras]][constant[action]] assign[=] constant[stop]
call[name[extras]][constant[spiderid]] assign[=] name[spiderid]
call[name[extras]][constant[appid]] assign[=] name[appid]
call[name[extras]][constant[crawlid]] assign[=] name[crawlid]
call[name[extras]][constant[total_purged]] assign[=] name[result]
call[name[extras]][constant[uuid]] assign[=] name[uuid]
call[name[extras]][constant[server_time]] assign[=] call[name[int], parameter[call[name[self].get_current_time, parameter[]]]]
if call[name[self]._send_to_kafka, parameter[name[extras]]] begin[:]
variable[timeout_key] assign[=] call[constant[timeout:{sid}:{aid}:{cid}].format, parameter[]]
call[name[self].redis_conn.delete, parameter[name[timeout_key]]]
call[name[extras]][constant[success]] assign[=] constant[True]
call[name[self].logger.info, parameter[constant[Sent stop ack to kafka]]] | keyword[def] identifier[handle] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
identifier[elements] = identifier[key] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[elements] )!= literal[int] :
identifier[self] . identifier[logger] . identifier[warn] ( literal[string] )
keyword[return]
identifier[spiderid] = identifier[elements] [ literal[int] ]
identifier[appid] = identifier[elements] [ literal[int] ]
identifier[crawlid] = identifier[elements] [ literal[int] ]
identifier[uuid] = identifier[value]
identifier[extras] = identifier[self] . identifier[get_log_dict] ( literal[string] , identifier[appid] ,
identifier[spiderid] , identifier[uuid] , identifier[crawlid] )
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[extra] = identifier[extras] )
identifier[redis_key] = identifier[spiderid] + literal[string]
identifier[value] = literal[string] . identifier[format] ( identifier[appid] = identifier[appid] ,
identifier[crawlid] = identifier[crawlid] )
identifier[self] . identifier[redis_conn] . identifier[sadd] ( identifier[redis_key] , identifier[value] )
identifier[result] = identifier[self] . identifier[_purge_crawl] ( identifier[spiderid] , identifier[appid] , identifier[crawlid] )
identifier[extras] ={}
identifier[extras] [ literal[string] ]= literal[string]
identifier[extras] [ literal[string] ]= identifier[spiderid]
identifier[extras] [ literal[string] ]= identifier[appid]
identifier[extras] [ literal[string] ]= identifier[crawlid]
identifier[extras] [ literal[string] ]= identifier[result]
identifier[extras] [ literal[string] ]= identifier[uuid]
identifier[extras] [ literal[string] ]= identifier[int] ( identifier[self] . identifier[get_current_time] ())
keyword[if] identifier[self] . identifier[_send_to_kafka] ( identifier[extras] ):
identifier[timeout_key] = literal[string] . identifier[format] (
identifier[sid] = identifier[spiderid] ,
identifier[aid] = identifier[appid] ,
identifier[cid] = identifier[crawlid] )
identifier[self] . identifier[redis_conn] . identifier[delete] ( identifier[timeout_key] )
identifier[extras] [ literal[string] ]= keyword[True]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[extra] = identifier[extras] )
keyword[else] :
identifier[extras] [ literal[string] ]= keyword[False]
identifier[self] . identifier[logger] . identifier[error] ( literal[string] , identifier[extra] = identifier[extras] ) | def handle(self, key, value):
"""
Processes a vaild action info request
@param key: The key that matched the request
@param value: The value associated with the key
"""
# break down key
elements = key.split(':')
if len(elements) != 4:
self.logger.warn('Stop requests need a crawlid and appid')
return # depends on [control=['if'], data=[]]
spiderid = elements[1]
appid = elements[2]
crawlid = elements[3]
uuid = value
# log we received the stop message
extras = self.get_log_dict('stop', appid, spiderid, uuid, crawlid)
self.logger.info('Received stop request', extra=extras)
redis_key = spiderid + ':blacklist'
value = '{appid}||{crawlid}'.format(appid=appid, crawlid=crawlid)
# add this to the blacklist set
self.redis_conn.sadd(redis_key, value)
# purge crawlid from current set
result = self._purge_crawl(spiderid, appid, crawlid)
# item to send to kafka
extras = {}
extras['action'] = 'stop'
extras['spiderid'] = spiderid
extras['appid'] = appid
extras['crawlid'] = crawlid
extras['total_purged'] = result
extras['uuid'] = uuid
extras['server_time'] = int(self.get_current_time())
if self._send_to_kafka(extras):
# delete timeout for crawl (if needed) since stopped
timeout_key = 'timeout:{sid}:{aid}:{cid}'.format(sid=spiderid, aid=appid, cid=crawlid)
self.redis_conn.delete(timeout_key)
extras['success'] = True
self.logger.info('Sent stop ack to kafka', extra=extras) # depends on [control=['if'], data=[]]
else:
extras['success'] = False
self.logger.error('Failed to send stop ack to kafka', extra=extras) |
def run(self, duration, obs):
"""Run the simulation.
Parameters
----------
duration : Real
a duration for running a simulation.
A simulation is expected to be stopped at t() + duration.
observers : list of Obeservers, optional
observers
"""
from ecell4_base.core import TimeoutObserver
timeout = TimeoutObserver(self.__timeout)
if isinstance(obs, collections.Iterable):
obs = tuple(obs) + (timeout, )
else:
obs = (obs, timeout)
p = ProgressBar(**self.__kwargs)
p.animate(0.0)
tstart = self.__sim.t()
upto = tstart + duration
while self.__sim.t() < upto:
self.__sim.run(upto - self.__sim.t(), obs)
p.animate((self.__sim.t() - tstart) / duration, timeout.accumulation())
if self.__flush:
p.flush()
else:
print() | def function[run, parameter[self, duration, obs]]:
constant[Run the simulation.
Parameters
----------
duration : Real
a duration for running a simulation.
A simulation is expected to be stopped at t() + duration.
observers : list of Obeservers, optional
observers
]
from relative_module[ecell4_base.core] import module[TimeoutObserver]
variable[timeout] assign[=] call[name[TimeoutObserver], parameter[name[self].__timeout]]
if call[name[isinstance], parameter[name[obs], name[collections].Iterable]] begin[:]
variable[obs] assign[=] binary_operation[call[name[tuple], parameter[name[obs]]] + tuple[[<ast.Name object at 0x7da1b0d68880>]]]
variable[p] assign[=] call[name[ProgressBar], parameter[]]
call[name[p].animate, parameter[constant[0.0]]]
variable[tstart] assign[=] call[name[self].__sim.t, parameter[]]
variable[upto] assign[=] binary_operation[name[tstart] + name[duration]]
while compare[call[name[self].__sim.t, parameter[]] less[<] name[upto]] begin[:]
call[name[self].__sim.run, parameter[binary_operation[name[upto] - call[name[self].__sim.t, parameter[]]], name[obs]]]
call[name[p].animate, parameter[binary_operation[binary_operation[call[name[self].__sim.t, parameter[]] - name[tstart]] / name[duration]], call[name[timeout].accumulation, parameter[]]]]
if name[self].__flush begin[:]
call[name[p].flush, parameter[]] | keyword[def] identifier[run] ( identifier[self] , identifier[duration] , identifier[obs] ):
literal[string]
keyword[from] identifier[ecell4_base] . identifier[core] keyword[import] identifier[TimeoutObserver]
identifier[timeout] = identifier[TimeoutObserver] ( identifier[self] . identifier[__timeout] )
keyword[if] identifier[isinstance] ( identifier[obs] , identifier[collections] . identifier[Iterable] ):
identifier[obs] = identifier[tuple] ( identifier[obs] )+( identifier[timeout] ,)
keyword[else] :
identifier[obs] =( identifier[obs] , identifier[timeout] )
identifier[p] = identifier[ProgressBar] (** identifier[self] . identifier[__kwargs] )
identifier[p] . identifier[animate] ( literal[int] )
identifier[tstart] = identifier[self] . identifier[__sim] . identifier[t] ()
identifier[upto] = identifier[tstart] + identifier[duration]
keyword[while] identifier[self] . identifier[__sim] . identifier[t] ()< identifier[upto] :
identifier[self] . identifier[__sim] . identifier[run] ( identifier[upto] - identifier[self] . identifier[__sim] . identifier[t] (), identifier[obs] )
identifier[p] . identifier[animate] (( identifier[self] . identifier[__sim] . identifier[t] ()- identifier[tstart] )/ identifier[duration] , identifier[timeout] . identifier[accumulation] ())
keyword[if] identifier[self] . identifier[__flush] :
identifier[p] . identifier[flush] ()
keyword[else] :
identifier[print] () | def run(self, duration, obs):
"""Run the simulation.
Parameters
----------
duration : Real
a duration for running a simulation.
A simulation is expected to be stopped at t() + duration.
observers : list of Obeservers, optional
observers
"""
from ecell4_base.core import TimeoutObserver
timeout = TimeoutObserver(self.__timeout)
if isinstance(obs, collections.Iterable):
obs = tuple(obs) + (timeout,) # depends on [control=['if'], data=[]]
else:
obs = (obs, timeout)
p = ProgressBar(**self.__kwargs)
p.animate(0.0)
tstart = self.__sim.t()
upto = tstart + duration
while self.__sim.t() < upto:
self.__sim.run(upto - self.__sim.t(), obs)
p.animate((self.__sim.t() - tstart) / duration, timeout.accumulation()) # depends on [control=['while'], data=['upto']]
if self.__flush:
p.flush() # depends on [control=['if'], data=[]]
else:
print() |
def _exec_command(adb_cmd):
"""
Format adb command and execute it in shell
:param adb_cmd: list adb command to execute
:return: string '0' and shell command output if successful, otherwise
raise CalledProcessError exception and return error code
"""
t = tempfile.TemporaryFile()
final_adb_cmd = []
for e in adb_cmd:
if e != '': # avoid items with empty string...
final_adb_cmd.append(e) # ... so that final command doesn't
# contain extra spaces
print('\n*** Executing ' + ' '.join(adb_cmd) + ' ' + 'command')
try:
output = check_output(final_adb_cmd, stderr=t)
except CalledProcessError as e:
t.seek(0)
result = e.returncode, t.read()
else:
result = 0, output
print('\n' + result[1])
return result | def function[_exec_command, parameter[adb_cmd]]:
constant[
Format adb command and execute it in shell
:param adb_cmd: list adb command to execute
:return: string '0' and shell command output if successful, otherwise
raise CalledProcessError exception and return error code
]
variable[t] assign[=] call[name[tempfile].TemporaryFile, parameter[]]
variable[final_adb_cmd] assign[=] list[[]]
for taget[name[e]] in starred[name[adb_cmd]] begin[:]
if compare[name[e] not_equal[!=] constant[]] begin[:]
call[name[final_adb_cmd].append, parameter[name[e]]]
call[name[print], parameter[binary_operation[binary_operation[binary_operation[constant[
*** Executing ] + call[constant[ ].join, parameter[name[adb_cmd]]]] + constant[ ]] + constant[command]]]]
<ast.Try object at 0x7da18c4cfb50>
return[name[result]] | keyword[def] identifier[_exec_command] ( identifier[adb_cmd] ):
literal[string]
identifier[t] = identifier[tempfile] . identifier[TemporaryFile] ()
identifier[final_adb_cmd] =[]
keyword[for] identifier[e] keyword[in] identifier[adb_cmd] :
keyword[if] identifier[e] != literal[string] :
identifier[final_adb_cmd] . identifier[append] ( identifier[e] )
identifier[print] ( literal[string] + literal[string] . identifier[join] ( identifier[adb_cmd] )+ literal[string] + literal[string] )
keyword[try] :
identifier[output] = identifier[check_output] ( identifier[final_adb_cmd] , identifier[stderr] = identifier[t] )
keyword[except] identifier[CalledProcessError] keyword[as] identifier[e] :
identifier[t] . identifier[seek] ( literal[int] )
identifier[result] = identifier[e] . identifier[returncode] , identifier[t] . identifier[read] ()
keyword[else] :
identifier[result] = literal[int] , identifier[output]
identifier[print] ( literal[string] + identifier[result] [ literal[int] ])
keyword[return] identifier[result] | def _exec_command(adb_cmd):
"""
Format adb command and execute it in shell
:param adb_cmd: list adb command to execute
:return: string '0' and shell command output if successful, otherwise
raise CalledProcessError exception and return error code
"""
t = tempfile.TemporaryFile()
final_adb_cmd = []
for e in adb_cmd:
if e != '': # avoid items with empty string...
final_adb_cmd.append(e) # ... so that final command doesn't # depends on [control=['if'], data=['e']] # depends on [control=['for'], data=['e']]
# contain extra spaces
print('\n*** Executing ' + ' '.join(adb_cmd) + ' ' + 'command')
try:
output = check_output(final_adb_cmd, stderr=t) # depends on [control=['try'], data=[]]
except CalledProcessError as e:
t.seek(0)
result = (e.returncode, t.read()) # depends on [control=['except'], data=['e']]
else:
result = (0, output)
print('\n' + result[1])
return result |
def _set_mpls_reopt_lsp(self, v, load=False):
"""
Setter method for mpls_reopt_lsp, mapped from YANG variable /brocade_mpls_rpc/mpls_reopt_lsp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_reopt_lsp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_reopt_lsp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mpls_reopt_lsp.mpls_reopt_lsp, is_leaf=True, yang_name="mpls-reopt-lsp", rest_name="mpls-reopt-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'mplsReoptimize'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_reopt_lsp must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=mpls_reopt_lsp.mpls_reopt_lsp, is_leaf=True, yang_name="mpls-reopt-lsp", rest_name="mpls-reopt-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'mplsReoptimize'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""",
})
self.__mpls_reopt_lsp = t
if hasattr(self, '_set'):
self._set() | def function[_set_mpls_reopt_lsp, parameter[self, v, load]]:
constant[
Setter method for mpls_reopt_lsp, mapped from YANG variable /brocade_mpls_rpc/mpls_reopt_lsp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_reopt_lsp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_reopt_lsp() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18bccb370>
name[self].__mpls_reopt_lsp assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_mpls_reopt_lsp] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[mpls_reopt_lsp] . identifier[mpls_reopt_lsp] , identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[False] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__mpls_reopt_lsp] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_mpls_reopt_lsp(self, v, load=False):
"""
Setter method for mpls_reopt_lsp, mapped from YANG variable /brocade_mpls_rpc/mpls_reopt_lsp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_reopt_lsp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_reopt_lsp() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=mpls_reopt_lsp.mpls_reopt_lsp, is_leaf=True, yang_name='mpls-reopt-lsp', rest_name='mpls-reopt-lsp', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'mplsReoptimize'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'mpls_reopt_lsp must be of a type compatible with rpc', 'defined-type': 'rpc', 'generated-type': 'YANGDynClass(base=mpls_reopt_lsp.mpls_reopt_lsp, is_leaf=True, yang_name="mpls-reopt-lsp", rest_name="mpls-reopt-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u\'tailf-common\': {u\'hidden\': u\'rpccmd\', u\'actionpoint\': u\'mplsReoptimize\'}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'rpc\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__mpls_reopt_lsp = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def extend(self, values):
"""Extend the array, appending the given values."""
self.database.run_script(
'array_extend',
keys=[self.key],
args=values) | def function[extend, parameter[self, values]]:
constant[Extend the array, appending the given values.]
call[name[self].database.run_script, parameter[constant[array_extend]]] | keyword[def] identifier[extend] ( identifier[self] , identifier[values] ):
literal[string]
identifier[self] . identifier[database] . identifier[run_script] (
literal[string] ,
identifier[keys] =[ identifier[self] . identifier[key] ],
identifier[args] = identifier[values] ) | def extend(self, values):
"""Extend the array, appending the given values."""
self.database.run_script('array_extend', keys=[self.key], args=values) |
def upstart(
state, host, name,
running=True, restarted=False, reloaded=False,
command=None, enabled=None,
):
'''
Manage the state of upstart managed services.
+ name: name of the service to manage
+ running: whether the service should be running
+ restarted: whether the service should be restarted
+ reloaded: whether the service should be reloaded
+ command: custom command to pass like: ``/etc/rc.d/<name> <command>``
+ enabled: whether this service should be enabled/disabled on boot
Enabling/disabling services:
Upstart jobs define runlevels in their config files - as such there is no way to
edit/list these without fiddling with the config. So pyinfra simply manages the
existence of a ``/etc/init/<service>.override`` file, and sets its content to
"manual" to disable automatic start of services.
'''
yield _handle_service_control(
name, host.fact.upstart_status,
'initctl {1} {0}',
running, restarted, reloaded, command,
)
# Upstart jobs are setup w/runlevels etc in their config files, so here we just check
# there's no override file.
if enabled is True:
yield files.file(
state, host,
'/etc/init/{0}.override'.format(name),
present=False,
)
# Set the override file to "manual" to disable automatic start
elif enabled is False:
yield 'echo "manual" > /etc/init/{0}.override'.format(name) | def function[upstart, parameter[state, host, name, running, restarted, reloaded, command, enabled]]:
constant[
Manage the state of upstart managed services.
+ name: name of the service to manage
+ running: whether the service should be running
+ restarted: whether the service should be restarted
+ reloaded: whether the service should be reloaded
+ command: custom command to pass like: ``/etc/rc.d/<name> <command>``
+ enabled: whether this service should be enabled/disabled on boot
Enabling/disabling services:
Upstart jobs define runlevels in their config files - as such there is no way to
edit/list these without fiddling with the config. So pyinfra simply manages the
existence of a ``/etc/init/<service>.override`` file, and sets its content to
"manual" to disable automatic start of services.
]
<ast.Yield object at 0x7da18bc727a0>
if compare[name[enabled] is constant[True]] begin[:]
<ast.Yield object at 0x7da18bc73a60> | keyword[def] identifier[upstart] (
identifier[state] , identifier[host] , identifier[name] ,
identifier[running] = keyword[True] , identifier[restarted] = keyword[False] , identifier[reloaded] = keyword[False] ,
identifier[command] = keyword[None] , identifier[enabled] = keyword[None] ,
):
literal[string]
keyword[yield] identifier[_handle_service_control] (
identifier[name] , identifier[host] . identifier[fact] . identifier[upstart_status] ,
literal[string] ,
identifier[running] , identifier[restarted] , identifier[reloaded] , identifier[command] ,
)
keyword[if] identifier[enabled] keyword[is] keyword[True] :
keyword[yield] identifier[files] . identifier[file] (
identifier[state] , identifier[host] ,
literal[string] . identifier[format] ( identifier[name] ),
identifier[present] = keyword[False] ,
)
keyword[elif] identifier[enabled] keyword[is] keyword[False] :
keyword[yield] literal[string] . identifier[format] ( identifier[name] ) | def upstart(state, host, name, running=True, restarted=False, reloaded=False, command=None, enabled=None):
"""
Manage the state of upstart managed services.
+ name: name of the service to manage
+ running: whether the service should be running
+ restarted: whether the service should be restarted
+ reloaded: whether the service should be reloaded
+ command: custom command to pass like: ``/etc/rc.d/<name> <command>``
+ enabled: whether this service should be enabled/disabled on boot
Enabling/disabling services:
Upstart jobs define runlevels in their config files - as such there is no way to
edit/list these without fiddling with the config. So pyinfra simply manages the
existence of a ``/etc/init/<service>.override`` file, and sets its content to
"manual" to disable automatic start of services.
"""
yield _handle_service_control(name, host.fact.upstart_status, 'initctl {1} {0}', running, restarted, reloaded, command)
# Upstart jobs are setup w/runlevels etc in their config files, so here we just check
# there's no override file.
if enabled is True:
yield files.file(state, host, '/etc/init/{0}.override'.format(name), present=False) # depends on [control=['if'], data=[]]
# Set the override file to "manual" to disable automatic start
elif enabled is False:
yield 'echo "manual" > /etc/init/{0}.override'.format(name) # depends on [control=['if'], data=[]] |
def _render_rst(self): # pragma: no cover
"""Render lines of reStructuredText for items yielded by
:meth:`~doctor.docs.base.BaseHarness.iter_annotations`.
"""
# Create a mapping of headers to annotations. We want to group
# all annotations by a header, but they could be in multiple handlers
# so we create a map of them here with the heading as the key and
# the list of associated annotations as a list. This is so we can
# sort them alphabetically to make reading the api docs easier.
heading_to_annotations_map = defaultdict(list)
for heading, route, handler, annotations in (
self.harness.iter_annotations()):
# Set the route and handler as attributes so we can retrieve them
# when we loop through them all below.
for annotation in annotations:
annotation.route = route
annotation.handler = handler
heading_to_annotations_map[heading].append(annotation)
headings = list(heading_to_annotations_map.keys())
headings.sort()
previous_heading = None
for heading in headings:
annotations = heading_to_annotations_map.get(heading)
# Sort all the annotations by title.
annotations.sort(key=lambda a: a.title)
# Only emit a new heading if the resource has changed. This
# esnures that documented endpoints for the same resource all
# end up under a single heading.
if previous_heading != heading:
previous_heading = heading
yield HEADING_TOKEN + heading
for annotation in annotations:
route = annotation.route
normalized_route = normalize_route(route)
handler = annotation.handler
# Adds a title for the endpoint.
if annotation.title is not None:
yield annotation.title
yield '#' * len(annotation.title)
docstring = get_description_lines(getattr(annotation.logic,
'__doc__', None))
# Documents the logic function associated with the annotation.
docstring.append(':Logic Func: :func:`~{}.{}`'.format(
annotation.logic.__module__, annotation.logic.__name__))
field = '<json'
if annotation.http_method in ('DELETE', 'GET'):
field = 'query'
docstring.extend(get_json_lines(
annotation, field=field, route=normalized_route,
request=True)
)
# Document any request headers.
defined_headers = list(self.harness._get_headers(
str(route), annotation).keys())
defined_headers.sort()
for header in defined_headers:
definition = self.harness.header_definitions.get(
header, '').strip()
docstring.append(':reqheader {}: {}'.format(
header, definition))
# Document response if a type was defined.
if annotation.return_annotation != Parameter.empty:
docstring.extend(get_json_lines(
annotation, field='>json', route=normalized_route))
docstring.extend(self._make_example(route, handler, annotation))
for line in http_directive(annotation.http_method,
normalized_route, docstring):
yield line
# Document resource objects.
for line in get_resource_object_doc_lines():
yield line | def function[_render_rst, parameter[self]]:
constant[Render lines of reStructuredText for items yielded by
:meth:`~doctor.docs.base.BaseHarness.iter_annotations`.
]
variable[heading_to_annotations_map] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[tuple[[<ast.Name object at 0x7da20c7c9a50>, <ast.Name object at 0x7da20c7c9480>, <ast.Name object at 0x7da20c7caa10>, <ast.Name object at 0x7da20c7cb9a0>]]] in starred[call[name[self].harness.iter_annotations, parameter[]]] begin[:]
for taget[name[annotation]] in starred[name[annotations]] begin[:]
name[annotation].route assign[=] name[route]
name[annotation].handler assign[=] name[handler]
call[call[name[heading_to_annotations_map]][name[heading]].append, parameter[name[annotation]]]
variable[headings] assign[=] call[name[list], parameter[call[name[heading_to_annotations_map].keys, parameter[]]]]
call[name[headings].sort, parameter[]]
variable[previous_heading] assign[=] constant[None]
for taget[name[heading]] in starred[name[headings]] begin[:]
variable[annotations] assign[=] call[name[heading_to_annotations_map].get, parameter[name[heading]]]
call[name[annotations].sort, parameter[]]
if compare[name[previous_heading] not_equal[!=] name[heading]] begin[:]
variable[previous_heading] assign[=] name[heading]
<ast.Yield object at 0x7da20e954580>
for taget[name[annotation]] in starred[name[annotations]] begin[:]
variable[route] assign[=] name[annotation].route
variable[normalized_route] assign[=] call[name[normalize_route], parameter[name[route]]]
variable[handler] assign[=] name[annotation].handler
if compare[name[annotation].title is_not constant[None]] begin[:]
<ast.Yield object at 0x7da20e956a70>
<ast.Yield object at 0x7da20e9569e0>
variable[docstring] assign[=] call[name[get_description_lines], parameter[call[name[getattr], parameter[name[annotation].logic, constant[__doc__], constant[None]]]]]
call[name[docstring].append, parameter[call[constant[:Logic Func: :func:`~{}.{}`].format, parameter[name[annotation].logic.__module__, name[annotation].logic.__name__]]]]
variable[field] assign[=] constant[<json]
if compare[name[annotation].http_method in tuple[[<ast.Constant object at 0x7da20c6abd00>, <ast.Constant object at 0x7da20c6a88e0>]]] begin[:]
variable[field] assign[=] constant[query]
call[name[docstring].extend, parameter[call[name[get_json_lines], parameter[name[annotation]]]]]
variable[defined_headers] assign[=] call[name[list], parameter[call[call[name[self].harness._get_headers, parameter[call[name[str], parameter[name[route]]], name[annotation]]].keys, parameter[]]]]
call[name[defined_headers].sort, parameter[]]
for taget[name[header]] in starred[name[defined_headers]] begin[:]
variable[definition] assign[=] call[call[name[self].harness.header_definitions.get, parameter[name[header], constant[]]].strip, parameter[]]
call[name[docstring].append, parameter[call[constant[:reqheader {}: {}].format, parameter[name[header], name[definition]]]]]
if compare[name[annotation].return_annotation not_equal[!=] name[Parameter].empty] begin[:]
call[name[docstring].extend, parameter[call[name[get_json_lines], parameter[name[annotation]]]]]
call[name[docstring].extend, parameter[call[name[self]._make_example, parameter[name[route], name[handler], name[annotation]]]]]
for taget[name[line]] in starred[call[name[http_directive], parameter[name[annotation].http_method, name[normalized_route], name[docstring]]]] begin[:]
<ast.Yield object at 0x7da20c6a83d0>
for taget[name[line]] in starred[call[name[get_resource_object_doc_lines], parameter[]]] begin[:]
<ast.Yield object at 0x7da20c6aba00> | keyword[def] identifier[_render_rst] ( identifier[self] ):
literal[string]
identifier[heading_to_annotations_map] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[heading] , identifier[route] , identifier[handler] , identifier[annotations] keyword[in] (
identifier[self] . identifier[harness] . identifier[iter_annotations] ()):
keyword[for] identifier[annotation] keyword[in] identifier[annotations] :
identifier[annotation] . identifier[route] = identifier[route]
identifier[annotation] . identifier[handler] = identifier[handler]
identifier[heading_to_annotations_map] [ identifier[heading] ]. identifier[append] ( identifier[annotation] )
identifier[headings] = identifier[list] ( identifier[heading_to_annotations_map] . identifier[keys] ())
identifier[headings] . identifier[sort] ()
identifier[previous_heading] = keyword[None]
keyword[for] identifier[heading] keyword[in] identifier[headings] :
identifier[annotations] = identifier[heading_to_annotations_map] . identifier[get] ( identifier[heading] )
identifier[annotations] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[a] : identifier[a] . identifier[title] )
keyword[if] identifier[previous_heading] != identifier[heading] :
identifier[previous_heading] = identifier[heading]
keyword[yield] identifier[HEADING_TOKEN] + identifier[heading]
keyword[for] identifier[annotation] keyword[in] identifier[annotations] :
identifier[route] = identifier[annotation] . identifier[route]
identifier[normalized_route] = identifier[normalize_route] ( identifier[route] )
identifier[handler] = identifier[annotation] . identifier[handler]
keyword[if] identifier[annotation] . identifier[title] keyword[is] keyword[not] keyword[None] :
keyword[yield] identifier[annotation] . identifier[title]
keyword[yield] literal[string] * identifier[len] ( identifier[annotation] . identifier[title] )
identifier[docstring] = identifier[get_description_lines] ( identifier[getattr] ( identifier[annotation] . identifier[logic] ,
literal[string] , keyword[None] ))
identifier[docstring] . identifier[append] ( literal[string] . identifier[format] (
identifier[annotation] . identifier[logic] . identifier[__module__] , identifier[annotation] . identifier[logic] . identifier[__name__] ))
identifier[field] = literal[string]
keyword[if] identifier[annotation] . identifier[http_method] keyword[in] ( literal[string] , literal[string] ):
identifier[field] = literal[string]
identifier[docstring] . identifier[extend] ( identifier[get_json_lines] (
identifier[annotation] , identifier[field] = identifier[field] , identifier[route] = identifier[normalized_route] ,
identifier[request] = keyword[True] )
)
identifier[defined_headers] = identifier[list] ( identifier[self] . identifier[harness] . identifier[_get_headers] (
identifier[str] ( identifier[route] ), identifier[annotation] ). identifier[keys] ())
identifier[defined_headers] . identifier[sort] ()
keyword[for] identifier[header] keyword[in] identifier[defined_headers] :
identifier[definition] = identifier[self] . identifier[harness] . identifier[header_definitions] . identifier[get] (
identifier[header] , literal[string] ). identifier[strip] ()
identifier[docstring] . identifier[append] ( literal[string] . identifier[format] (
identifier[header] , identifier[definition] ))
keyword[if] identifier[annotation] . identifier[return_annotation] != identifier[Parameter] . identifier[empty] :
identifier[docstring] . identifier[extend] ( identifier[get_json_lines] (
identifier[annotation] , identifier[field] = literal[string] , identifier[route] = identifier[normalized_route] ))
identifier[docstring] . identifier[extend] ( identifier[self] . identifier[_make_example] ( identifier[route] , identifier[handler] , identifier[annotation] ))
keyword[for] identifier[line] keyword[in] identifier[http_directive] ( identifier[annotation] . identifier[http_method] ,
identifier[normalized_route] , identifier[docstring] ):
keyword[yield] identifier[line]
keyword[for] identifier[line] keyword[in] identifier[get_resource_object_doc_lines] ():
keyword[yield] identifier[line] | def _render_rst(self): # pragma: no cover
'Render lines of reStructuredText for items yielded by\n :meth:`~doctor.docs.base.BaseHarness.iter_annotations`.\n '
# Create a mapping of headers to annotations. We want to group
# all annotations by a header, but they could be in multiple handlers
# so we create a map of them here with the heading as the key and
# the list of associated annotations as a list. This is so we can
# sort them alphabetically to make reading the api docs easier.
heading_to_annotations_map = defaultdict(list)
for (heading, route, handler, annotations) in self.harness.iter_annotations():
# Set the route and handler as attributes so we can retrieve them
# when we loop through them all below.
for annotation in annotations:
annotation.route = route
annotation.handler = handler
heading_to_annotations_map[heading].append(annotation) # depends on [control=['for'], data=['annotation']] # depends on [control=['for'], data=[]]
headings = list(heading_to_annotations_map.keys())
headings.sort()
previous_heading = None
for heading in headings:
annotations = heading_to_annotations_map.get(heading)
# Sort all the annotations by title.
annotations.sort(key=lambda a: a.title)
# Only emit a new heading if the resource has changed. This
# esnures that documented endpoints for the same resource all
# end up under a single heading.
if previous_heading != heading:
previous_heading = heading
yield (HEADING_TOKEN + heading) # depends on [control=['if'], data=['previous_heading', 'heading']]
for annotation in annotations:
route = annotation.route
normalized_route = normalize_route(route)
handler = annotation.handler
# Adds a title for the endpoint.
if annotation.title is not None:
yield annotation.title
yield ('#' * len(annotation.title)) # depends on [control=['if'], data=[]]
docstring = get_description_lines(getattr(annotation.logic, '__doc__', None))
# Documents the logic function associated with the annotation.
docstring.append(':Logic Func: :func:`~{}.{}`'.format(annotation.logic.__module__, annotation.logic.__name__))
field = '<json'
if annotation.http_method in ('DELETE', 'GET'):
field = 'query' # depends on [control=['if'], data=[]]
docstring.extend(get_json_lines(annotation, field=field, route=normalized_route, request=True))
# Document any request headers.
defined_headers = list(self.harness._get_headers(str(route), annotation).keys())
defined_headers.sort()
for header in defined_headers:
definition = self.harness.header_definitions.get(header, '').strip()
docstring.append(':reqheader {}: {}'.format(header, definition)) # depends on [control=['for'], data=['header']]
# Document response if a type was defined.
if annotation.return_annotation != Parameter.empty:
docstring.extend(get_json_lines(annotation, field='>json', route=normalized_route)) # depends on [control=['if'], data=[]]
docstring.extend(self._make_example(route, handler, annotation))
for line in http_directive(annotation.http_method, normalized_route, docstring):
yield line # depends on [control=['for'], data=['line']] # depends on [control=['for'], data=['annotation']] # depends on [control=['for'], data=['heading']]
# Document resource objects.
for line in get_resource_object_doc_lines():
yield line # depends on [control=['for'], data=['line']] |
def _iter_info(self, niter, level=logging.INFO):
"""
Log iteration number and mismatch
Parameters
----------
level
logging level
Returns
-------
None
"""
max_mis = self.iter_mis[niter - 1]
msg = ' Iter {:<d}. max mismatch = {:8.7f}'.format(niter, max_mis)
logger.info(msg) | def function[_iter_info, parameter[self, niter, level]]:
constant[
Log iteration number and mismatch
Parameters
----------
level
logging level
Returns
-------
None
]
variable[max_mis] assign[=] call[name[self].iter_mis][binary_operation[name[niter] - constant[1]]]
variable[msg] assign[=] call[constant[ Iter {:<d}. max mismatch = {:8.7f}].format, parameter[name[niter], name[max_mis]]]
call[name[logger].info, parameter[name[msg]]] | keyword[def] identifier[_iter_info] ( identifier[self] , identifier[niter] , identifier[level] = identifier[logging] . identifier[INFO] ):
literal[string]
identifier[max_mis] = identifier[self] . identifier[iter_mis] [ identifier[niter] - literal[int] ]
identifier[msg] = literal[string] . identifier[format] ( identifier[niter] , identifier[max_mis] )
identifier[logger] . identifier[info] ( identifier[msg] ) | def _iter_info(self, niter, level=logging.INFO):
"""
Log iteration number and mismatch
Parameters
----------
level
logging level
Returns
-------
None
"""
max_mis = self.iter_mis[niter - 1]
msg = ' Iter {:<d}. max mismatch = {:8.7f}'.format(niter, max_mis)
logger.info(msg) |
def save_all(self):
"""Save all opened files.
Iterate through self.data and call save() on any modified files.
"""
for index in range(self.get_stack_count()):
if self.data[index].editor.document().isModified():
self.save(index) | def function[save_all, parameter[self]]:
constant[Save all opened files.
Iterate through self.data and call save() on any modified files.
]
for taget[name[index]] in starred[call[name[range], parameter[call[name[self].get_stack_count, parameter[]]]]] begin[:]
if call[call[call[name[self].data][name[index]].editor.document, parameter[]].isModified, parameter[]] begin[:]
call[name[self].save, parameter[name[index]]] | keyword[def] identifier[save_all] ( identifier[self] ):
literal[string]
keyword[for] identifier[index] keyword[in] identifier[range] ( identifier[self] . identifier[get_stack_count] ()):
keyword[if] identifier[self] . identifier[data] [ identifier[index] ]. identifier[editor] . identifier[document] (). identifier[isModified] ():
identifier[self] . identifier[save] ( identifier[index] ) | def save_all(self):
"""Save all opened files.
Iterate through self.data and call save() on any modified files.
"""
for index in range(self.get_stack_count()):
if self.data[index].editor.document().isModified():
self.save(index) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index']] |
def _process_out_of_bounds(self, value, start, end):
"Clips out of bounds values"
if isinstance(value, np.datetime64):
v = dt64_to_dt(value)
if isinstance(start, (int, float)):
start = convert_timestamp(start)
if isinstance(end, (int, float)):
end = convert_timestamp(end)
s, e = start, end
if isinstance(s, np.datetime64):
s = dt64_to_dt(s)
if isinstance(e, np.datetime64):
e = dt64_to_dt(e)
else:
v, s, e = value, start, end
if v < s:
value = start
elif v > e:
value = end
return value | def function[_process_out_of_bounds, parameter[self, value, start, end]]:
constant[Clips out of bounds values]
if call[name[isinstance], parameter[name[value], name[np].datetime64]] begin[:]
variable[v] assign[=] call[name[dt64_to_dt], parameter[name[value]]]
if call[name[isinstance], parameter[name[start], tuple[[<ast.Name object at 0x7da1b1c65930>, <ast.Name object at 0x7da1b1c67460>]]]] begin[:]
variable[start] assign[=] call[name[convert_timestamp], parameter[name[start]]]
if call[name[isinstance], parameter[name[end], tuple[[<ast.Name object at 0x7da1b1c675e0>, <ast.Name object at 0x7da1b1c67790>]]]] begin[:]
variable[end] assign[=] call[name[convert_timestamp], parameter[name[end]]]
<ast.Tuple object at 0x7da1b1c64190> assign[=] tuple[[<ast.Name object at 0x7da1b1c65e70>, <ast.Name object at 0x7da1b1c65ba0>]]
if call[name[isinstance], parameter[name[s], name[np].datetime64]] begin[:]
variable[s] assign[=] call[name[dt64_to_dt], parameter[name[s]]]
if call[name[isinstance], parameter[name[e], name[np].datetime64]] begin[:]
variable[e] assign[=] call[name[dt64_to_dt], parameter[name[e]]]
if compare[name[v] less[<] name[s]] begin[:]
variable[value] assign[=] name[start]
return[name[value]] | keyword[def] identifier[_process_out_of_bounds] ( identifier[self] , identifier[value] , identifier[start] , identifier[end] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[np] . identifier[datetime64] ):
identifier[v] = identifier[dt64_to_dt] ( identifier[value] )
keyword[if] identifier[isinstance] ( identifier[start] ,( identifier[int] , identifier[float] )):
identifier[start] = identifier[convert_timestamp] ( identifier[start] )
keyword[if] identifier[isinstance] ( identifier[end] ,( identifier[int] , identifier[float] )):
identifier[end] = identifier[convert_timestamp] ( identifier[end] )
identifier[s] , identifier[e] = identifier[start] , identifier[end]
keyword[if] identifier[isinstance] ( identifier[s] , identifier[np] . identifier[datetime64] ):
identifier[s] = identifier[dt64_to_dt] ( identifier[s] )
keyword[if] identifier[isinstance] ( identifier[e] , identifier[np] . identifier[datetime64] ):
identifier[e] = identifier[dt64_to_dt] ( identifier[e] )
keyword[else] :
identifier[v] , identifier[s] , identifier[e] = identifier[value] , identifier[start] , identifier[end]
keyword[if] identifier[v] < identifier[s] :
identifier[value] = identifier[start]
keyword[elif] identifier[v] > identifier[e] :
identifier[value] = identifier[end]
keyword[return] identifier[value] | def _process_out_of_bounds(self, value, start, end):
"""Clips out of bounds values"""
if isinstance(value, np.datetime64):
v = dt64_to_dt(value)
if isinstance(start, (int, float)):
start = convert_timestamp(start) # depends on [control=['if'], data=[]]
if isinstance(end, (int, float)):
end = convert_timestamp(end) # depends on [control=['if'], data=[]]
(s, e) = (start, end)
if isinstance(s, np.datetime64):
s = dt64_to_dt(s) # depends on [control=['if'], data=[]]
if isinstance(e, np.datetime64):
e = dt64_to_dt(e) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
(v, s, e) = (value, start, end)
if v < s:
value = start # depends on [control=['if'], data=[]]
elif v > e:
value = end # depends on [control=['if'], data=[]]
return value |
def start(self, timeout=None):
"""
Start OpenVPN and block until the connection is opened or there is
an error
:param timeout: time in seconds to wait for process to start
:return:
"""
if not timeout:
timeout = self.timeout
self.thread.start()
start_time = time.time()
while start_time + timeout > time.time():
self.thread.join(1)
if self.error or self.started:
break
if self.started:
logging.info("OpenVPN connected")
# append instance to connected list
OpenVPN.connected_instances.append(self)
else:
logging.warn("OpenVPN not started")
for line in self.notifications.split('\n'):
logging.warn("OpenVPN output:\t\t%s" % line) | def function[start, parameter[self, timeout]]:
constant[
Start OpenVPN and block until the connection is opened or there is
an error
:param timeout: time in seconds to wait for process to start
:return:
]
if <ast.UnaryOp object at 0x7da1b2844eb0> begin[:]
variable[timeout] assign[=] name[self].timeout
call[name[self].thread.start, parameter[]]
variable[start_time] assign[=] call[name[time].time, parameter[]]
while compare[binary_operation[name[start_time] + name[timeout]] greater[>] call[name[time].time, parameter[]]] begin[:]
call[name[self].thread.join, parameter[constant[1]]]
if <ast.BoolOp object at 0x7da1b2818760> begin[:]
break
if name[self].started begin[:]
call[name[logging].info, parameter[constant[OpenVPN connected]]]
call[name[OpenVPN].connected_instances.append, parameter[name[self]]] | keyword[def] identifier[start] ( identifier[self] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[timeout] :
identifier[timeout] = identifier[self] . identifier[timeout]
identifier[self] . identifier[thread] . identifier[start] ()
identifier[start_time] = identifier[time] . identifier[time] ()
keyword[while] identifier[start_time] + identifier[timeout] > identifier[time] . identifier[time] ():
identifier[self] . identifier[thread] . identifier[join] ( literal[int] )
keyword[if] identifier[self] . identifier[error] keyword[or] identifier[self] . identifier[started] :
keyword[break]
keyword[if] identifier[self] . identifier[started] :
identifier[logging] . identifier[info] ( literal[string] )
identifier[OpenVPN] . identifier[connected_instances] . identifier[append] ( identifier[self] )
keyword[else] :
identifier[logging] . identifier[warn] ( literal[string] )
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[notifications] . identifier[split] ( literal[string] ):
identifier[logging] . identifier[warn] ( literal[string] % identifier[line] ) | def start(self, timeout=None):
"""
Start OpenVPN and block until the connection is opened or there is
an error
:param timeout: time in seconds to wait for process to start
:return:
"""
if not timeout:
timeout = self.timeout # depends on [control=['if'], data=[]]
self.thread.start()
start_time = time.time()
while start_time + timeout > time.time():
self.thread.join(1)
if self.error or self.started:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
if self.started:
logging.info('OpenVPN connected')
# append instance to connected list
OpenVPN.connected_instances.append(self) # depends on [control=['if'], data=[]]
else:
logging.warn('OpenVPN not started')
for line in self.notifications.split('\n'):
logging.warn('OpenVPN output:\t\t%s' % line) # depends on [control=['for'], data=['line']] |
def avail_locations(conn=None, call=None):
'''
List available locations for Azure
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
if not conn:
conn = get_conn()
ret = {}
locations = conn.list_locations()
for location in locations:
ret[location.name] = {
'name': location.name,
'display_name': location.display_name,
'available_services': location.available_services,
}
return ret | def function[avail_locations, parameter[conn, call]]:
constant[
List available locations for Azure
]
if compare[name[call] equal[==] constant[action]] begin[:]
<ast.Raise object at 0x7da20c6aada0>
if <ast.UnaryOp object at 0x7da20c6a8430> begin[:]
variable[conn] assign[=] call[name[get_conn], parameter[]]
variable[ret] assign[=] dictionary[[], []]
variable[locations] assign[=] call[name[conn].list_locations, parameter[]]
for taget[name[location]] in starred[name[locations]] begin[:]
call[name[ret]][name[location].name] assign[=] dictionary[[<ast.Constant object at 0x7da20c6ab6a0>, <ast.Constant object at 0x7da20c6a8250>, <ast.Constant object at 0x7da20c6a91b0>], [<ast.Attribute object at 0x7da20c6abf70>, <ast.Attribute object at 0x7da20c6aaef0>, <ast.Attribute object at 0x7da20c6a85e0>]]
return[name[ret]] | keyword[def] identifier[avail_locations] ( identifier[conn] = keyword[None] , identifier[call] = keyword[None] ):
literal[string]
keyword[if] identifier[call] == literal[string] :
keyword[raise] identifier[SaltCloudSystemExit] (
literal[string]
literal[string]
)
keyword[if] keyword[not] identifier[conn] :
identifier[conn] = identifier[get_conn] ()
identifier[ret] ={}
identifier[locations] = identifier[conn] . identifier[list_locations] ()
keyword[for] identifier[location] keyword[in] identifier[locations] :
identifier[ret] [ identifier[location] . identifier[name] ]={
literal[string] : identifier[location] . identifier[name] ,
literal[string] : identifier[location] . identifier[display_name] ,
literal[string] : identifier[location] . identifier[available_services] ,
}
keyword[return] identifier[ret] | def avail_locations(conn=None, call=None):
"""
List available locations for Azure
"""
if call == 'action':
raise SaltCloudSystemExit('The avail_locations function must be called with -f or --function, or with the --list-locations option') # depends on [control=['if'], data=[]]
if not conn:
conn = get_conn() # depends on [control=['if'], data=[]]
ret = {}
locations = conn.list_locations()
for location in locations:
ret[location.name] = {'name': location.name, 'display_name': location.display_name, 'available_services': location.available_services} # depends on [control=['for'], data=['location']]
return ret |
def create_pipeline(name, unique_id, description='', region=None, key=None, keyid=None,
profile=None):
'''
Create a new, empty pipeline. This function is idempotent.
CLI example:
.. code-block:: bash
salt myminion boto_datapipeline.create_pipeline my_name my_unique_id
'''
client = _get_client(region, key, keyid, profile)
r = {}
try:
response = client.create_pipeline(
name=name,
uniqueId=unique_id,
description=description,
)
r['result'] = response['pipelineId']
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = six.text_type(e)
return r | def function[create_pipeline, parameter[name, unique_id, description, region, key, keyid, profile]]:
constant[
Create a new, empty pipeline. This function is idempotent.
CLI example:
.. code-block:: bash
salt myminion boto_datapipeline.create_pipeline my_name my_unique_id
]
variable[client] assign[=] call[name[_get_client], parameter[name[region], name[key], name[keyid], name[profile]]]
variable[r] assign[=] dictionary[[], []]
<ast.Try object at 0x7da204622860>
return[name[r]] | keyword[def] identifier[create_pipeline] ( identifier[name] , identifier[unique_id] , identifier[description] = literal[string] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] ,
identifier[profile] = keyword[None] ):
literal[string]
identifier[client] = identifier[_get_client] ( identifier[region] , identifier[key] , identifier[keyid] , identifier[profile] )
identifier[r] ={}
keyword[try] :
identifier[response] = identifier[client] . identifier[create_pipeline] (
identifier[name] = identifier[name] ,
identifier[uniqueId] = identifier[unique_id] ,
identifier[description] = identifier[description] ,
)
identifier[r] [ literal[string] ]= identifier[response] [ literal[string] ]
keyword[except] ( identifier[botocore] . identifier[exceptions] . identifier[BotoCoreError] , identifier[botocore] . identifier[exceptions] . identifier[ClientError] ) keyword[as] identifier[e] :
identifier[r] [ literal[string] ]= identifier[six] . identifier[text_type] ( identifier[e] )
keyword[return] identifier[r] | def create_pipeline(name, unique_id, description='', region=None, key=None, keyid=None, profile=None):
"""
Create a new, empty pipeline. This function is idempotent.
CLI example:
.. code-block:: bash
salt myminion boto_datapipeline.create_pipeline my_name my_unique_id
"""
client = _get_client(region, key, keyid, profile)
r = {}
try:
response = client.create_pipeline(name=name, uniqueId=unique_id, description=description)
r['result'] = response['pipelineId'] # depends on [control=['try'], data=[]]
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = six.text_type(e) # depends on [control=['except'], data=['e']]
return r |
def complex_files(script='d[1]+1j*d[2]', escript=None, paths=None, **kwargs):
"""
Loads files and plots complex data in the real-imaginary plane.
Parameters
----------
script='d[1]+1j*d[2]'
Complex-valued script for data array.
escript=None
Complex-valued script for error bars
paths=None
List of paths to open. None means use a dialog
See spinmob.plot.complex.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
"""
ds = _data.load_multiple(paths=paths)
if len(ds) == 0: return
if 'title' not in kwargs: kwargs['title'] = _os.path.split(ds[0].path)[0]
return complex_databoxes(ds, script=script, **kwargs) | def function[complex_files, parameter[script, escript, paths]]:
constant[
Loads files and plots complex data in the real-imaginary plane.
Parameters
----------
script='d[1]+1j*d[2]'
Complex-valued script for data array.
escript=None
Complex-valued script for error bars
paths=None
List of paths to open. None means use a dialog
See spinmob.plot.complex.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
]
variable[ds] assign[=] call[name[_data].load_multiple, parameter[]]
if compare[call[name[len], parameter[name[ds]]] equal[==] constant[0]] begin[:]
return[None]
if compare[constant[title] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[title]] assign[=] call[call[name[_os].path.split, parameter[call[name[ds]][constant[0]].path]]][constant[0]]
return[call[name[complex_databoxes], parameter[name[ds]]]] | keyword[def] identifier[complex_files] ( identifier[script] = literal[string] , identifier[escript] = keyword[None] , identifier[paths] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[ds] = identifier[_data] . identifier[load_multiple] ( identifier[paths] = identifier[paths] )
keyword[if] identifier[len] ( identifier[ds] )== literal[int] : keyword[return]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= identifier[_os] . identifier[path] . identifier[split] ( identifier[ds] [ literal[int] ]. identifier[path] )[ literal[int] ]
keyword[return] identifier[complex_databoxes] ( identifier[ds] , identifier[script] = identifier[script] ,** identifier[kwargs] ) | def complex_files(script='d[1]+1j*d[2]', escript=None, paths=None, **kwargs):
"""
Loads files and plots complex data in the real-imaginary plane.
Parameters
----------
script='d[1]+1j*d[2]'
Complex-valued script for data array.
escript=None
Complex-valued script for error bars
paths=None
List of paths to open. None means use a dialog
See spinmob.plot.complex.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
"""
ds = _data.load_multiple(paths=paths)
if len(ds) == 0:
return # depends on [control=['if'], data=[]]
if 'title' not in kwargs:
kwargs['title'] = _os.path.split(ds[0].path)[0] # depends on [control=['if'], data=['kwargs']]
return complex_databoxes(ds, script=script, **kwargs) |
def es_version(self, url):
"""Get Elasticsearch version.
Get the version of Elasticsearch. This is useful because
Elasticsearch and Kibiter are paired (same major version for 5, 6).
:param url: Elasticseearch url hosting Kibiter indices
:returns: major version, as string
"""
try:
res = self.grimoire_con.get(url)
res.raise_for_status()
major = res.json()['version']['number'].split(".")[0]
except Exception:
logger.error("Error retrieving Elasticsearch version: " + url)
raise
return major | def function[es_version, parameter[self, url]]:
constant[Get Elasticsearch version.
Get the version of Elasticsearch. This is useful because
Elasticsearch and Kibiter are paired (same major version for 5, 6).
:param url: Elasticseearch url hosting Kibiter indices
:returns: major version, as string
]
<ast.Try object at 0x7da1b009f3d0>
return[name[major]] | keyword[def] identifier[es_version] ( identifier[self] , identifier[url] ):
literal[string]
keyword[try] :
identifier[res] = identifier[self] . identifier[grimoire_con] . identifier[get] ( identifier[url] )
identifier[res] . identifier[raise_for_status] ()
identifier[major] = identifier[res] . identifier[json] ()[ literal[string] ][ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ]
keyword[except] identifier[Exception] :
identifier[logger] . identifier[error] ( literal[string] + identifier[url] )
keyword[raise]
keyword[return] identifier[major] | def es_version(self, url):
"""Get Elasticsearch version.
Get the version of Elasticsearch. This is useful because
Elasticsearch and Kibiter are paired (same major version for 5, 6).
:param url: Elasticseearch url hosting Kibiter indices
:returns: major version, as string
"""
try:
res = self.grimoire_con.get(url)
res.raise_for_status()
major = res.json()['version']['number'].split('.')[0] # depends on [control=['try'], data=[]]
except Exception:
logger.error('Error retrieving Elasticsearch version: ' + url)
raise # depends on [control=['except'], data=[]]
return major |
def get_modules(modulepath):
"""return all found modules at modulepath (eg, foo.bar) including modulepath module"""
m = importlib.import_module(modulepath)
mpath = m.__file__
ret = set([m])
if "__init__." in mpath.lower():
mpath = os.path.dirname(mpath)
# https://docs.python.org/2/library/pkgutil.html#pkgutil.iter_modules
for module_info in pkgutil.iter_modules([mpath]):
submodulepath = ".".join([modulepath, module_info[1]])
if module_info[2]:
# module is a package
submodules = get_modules(submodulepath)
ret.update(submodules)
else:
ret.add(importlib.import_module(submodulepath))
return ret | def function[get_modules, parameter[modulepath]]:
constant[return all found modules at modulepath (eg, foo.bar) including modulepath module]
variable[m] assign[=] call[name[importlib].import_module, parameter[name[modulepath]]]
variable[mpath] assign[=] name[m].__file__
variable[ret] assign[=] call[name[set], parameter[list[[<ast.Name object at 0x7da18ede6350>]]]]
if compare[constant[__init__.] in call[name[mpath].lower, parameter[]]] begin[:]
variable[mpath] assign[=] call[name[os].path.dirname, parameter[name[mpath]]]
for taget[name[module_info]] in starred[call[name[pkgutil].iter_modules, parameter[list[[<ast.Name object at 0x7da18ede4220>]]]]] begin[:]
variable[submodulepath] assign[=] call[constant[.].join, parameter[list[[<ast.Name object at 0x7da18ede7310>, <ast.Subscript object at 0x7da18ede6fb0>]]]]
if call[name[module_info]][constant[2]] begin[:]
variable[submodules] assign[=] call[name[get_modules], parameter[name[submodulepath]]]
call[name[ret].update, parameter[name[submodules]]]
return[name[ret]] | keyword[def] identifier[get_modules] ( identifier[modulepath] ):
literal[string]
identifier[m] = identifier[importlib] . identifier[import_module] ( identifier[modulepath] )
identifier[mpath] = identifier[m] . identifier[__file__]
identifier[ret] = identifier[set] ([ identifier[m] ])
keyword[if] literal[string] keyword[in] identifier[mpath] . identifier[lower] ():
identifier[mpath] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[mpath] )
keyword[for] identifier[module_info] keyword[in] identifier[pkgutil] . identifier[iter_modules] ([ identifier[mpath] ]):
identifier[submodulepath] = literal[string] . identifier[join] ([ identifier[modulepath] , identifier[module_info] [ literal[int] ]])
keyword[if] identifier[module_info] [ literal[int] ]:
identifier[submodules] = identifier[get_modules] ( identifier[submodulepath] )
identifier[ret] . identifier[update] ( identifier[submodules] )
keyword[else] :
identifier[ret] . identifier[add] ( identifier[importlib] . identifier[import_module] ( identifier[submodulepath] ))
keyword[return] identifier[ret] | def get_modules(modulepath):
"""return all found modules at modulepath (eg, foo.bar) including modulepath module"""
m = importlib.import_module(modulepath)
mpath = m.__file__
ret = set([m])
if '__init__.' in mpath.lower():
mpath = os.path.dirname(mpath)
# https://docs.python.org/2/library/pkgutil.html#pkgutil.iter_modules
for module_info in pkgutil.iter_modules([mpath]):
submodulepath = '.'.join([modulepath, module_info[1]])
if module_info[2]:
# module is a package
submodules = get_modules(submodulepath)
ret.update(submodules) # depends on [control=['if'], data=[]]
else:
ret.add(importlib.import_module(submodulepath)) # depends on [control=['for'], data=['module_info']] # depends on [control=['if'], data=[]]
return ret |
def monitor_key_get(service, key):
"""
Gets the value of an existing key in the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to search for.
:return: Returns the value of that key or None if not found.
"""
try:
output = check_output(
['ceph', '--id', service,
'config-key', 'get', str(key)]).decode('UTF-8')
return output
except CalledProcessError as e:
log("Monitor config-key get failed with message: {}".format(
e.output))
return None | def function[monitor_key_get, parameter[service, key]]:
constant[
Gets the value of an existing key in the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to search for.
:return: Returns the value of that key or None if not found.
]
<ast.Try object at 0x7da18bc70e80> | keyword[def] identifier[monitor_key_get] ( identifier[service] , identifier[key] ):
literal[string]
keyword[try] :
identifier[output] = identifier[check_output] (
[ literal[string] , literal[string] , identifier[service] ,
literal[string] , literal[string] , identifier[str] ( identifier[key] )]). identifier[decode] ( literal[string] )
keyword[return] identifier[output]
keyword[except] identifier[CalledProcessError] keyword[as] identifier[e] :
identifier[log] ( literal[string] . identifier[format] (
identifier[e] . identifier[output] ))
keyword[return] keyword[None] | def monitor_key_get(service, key):
"""
Gets the value of an existing key in the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to search for.
:return: Returns the value of that key or None if not found.
"""
try:
output = check_output(['ceph', '--id', service, 'config-key', 'get', str(key)]).decode('UTF-8')
return output # depends on [control=['try'], data=[]]
except CalledProcessError as e:
log('Monitor config-key get failed with message: {}'.format(e.output))
return None # depends on [control=['except'], data=['e']] |
def _openResources(self):
""" Evaluates the function to result an array
"""
arr = self._fun()
check_is_an_array(arr)
self._array = arr | def function[_openResources, parameter[self]]:
constant[ Evaluates the function to result an array
]
variable[arr] assign[=] call[name[self]._fun, parameter[]]
call[name[check_is_an_array], parameter[name[arr]]]
name[self]._array assign[=] name[arr] | keyword[def] identifier[_openResources] ( identifier[self] ):
literal[string]
identifier[arr] = identifier[self] . identifier[_fun] ()
identifier[check_is_an_array] ( identifier[arr] )
identifier[self] . identifier[_array] = identifier[arr] | def _openResources(self):
""" Evaluates the function to result an array
"""
arr = self._fun()
check_is_an_array(arr)
self._array = arr |
def poll(self):
"""return pairs of package indices and results of finished tasks
This method does not wait for tasks to finish.
Returns
-------
list
A list of pairs of package indices and results
"""
self.runid_to_return.extend(self.dispatcher.poll())
ret = self._collect_all_finished_pkgidx_result_pairs()
return ret | def function[poll, parameter[self]]:
constant[return pairs of package indices and results of finished tasks
This method does not wait for tasks to finish.
Returns
-------
list
A list of pairs of package indices and results
]
call[name[self].runid_to_return.extend, parameter[call[name[self].dispatcher.poll, parameter[]]]]
variable[ret] assign[=] call[name[self]._collect_all_finished_pkgidx_result_pairs, parameter[]]
return[name[ret]] | keyword[def] identifier[poll] ( identifier[self] ):
literal[string]
identifier[self] . identifier[runid_to_return] . identifier[extend] ( identifier[self] . identifier[dispatcher] . identifier[poll] ())
identifier[ret] = identifier[self] . identifier[_collect_all_finished_pkgidx_result_pairs] ()
keyword[return] identifier[ret] | def poll(self):
"""return pairs of package indices and results of finished tasks
This method does not wait for tasks to finish.
Returns
-------
list
A list of pairs of package indices and results
"""
self.runid_to_return.extend(self.dispatcher.poll())
ret = self._collect_all_finished_pkgidx_result_pairs()
return ret |
def time_multi_coincidence(times, slide_step=0, slop=.003,
pivot='H1', fixed='L1'):
""" Find multi detector concidences.
Parameters
----------
times: dict of numpy.ndarrays
Dictionary keyed by ifo of the times of each single detector trigger.
slide_step: float
The interval between time slides
slop: float
The amount of time to add to the TOF between detectors for coincidence
pivot: str
ifo used to test coincidence against in first stage
fixed: str
the other ifo used in the first stage coincidence which we'll use
as a fixed time reference for coincident triggers. All other detectors
are time slid by being fixed to this detector.
"""
# pivots are used to determine standard coincidence triggers, we then
# pair off additional detectors to those.
def win(ifo1, ifo2):
d1 = Detector(ifo1)
d2 = Detector(ifo2)
return d1.light_travel_time_to_detector(d2) + slop
# Find coincs first between the two fully time-slid detectors
pivot_id, fix_id, slide = time_coincidence(times[pivot], times[fixed],
win(pivot, fixed),
slide_step=slide_step)
# additional detectors do not slide independently of the fixed one
# Each trigger in an additional detector must be concident with an
# existing coincident one. All times moved to 'fixed' relative time
fixed_time = times[fixed][fix_id]
pivot_time = times[pivot][pivot_id] - slide_step * slide
ctimes = {fixed: fixed_time, pivot:pivot_time}
ids = {fixed:fix_id, pivot:pivot_id}
dep_ifos = [ifo for ifo in times.keys() if ifo != fixed and ifo != pivot]
for ifo1 in dep_ifos:
otime = times[ifo1]
sort = times[ifo1].argsort()
time = otime[sort]
# Find coincidences between dependent ifo triggers and existing coinc.
for ifo2 in ids.keys():
# Currently assumes that additional detectors do not slide
# independently of the 'fixed one'
#
# To modify that assumption, the code here would be modified
# by adding a function that remaps the coinc time frame and unmaps
# it and the end of this loop.
# This remapping must ensure
# * function of the standard slide number
# * ensure all times remain within coincident segment
# * unbiased distribution of triggers after mapping.
w = win(ifo1, ifo2)
left = numpy.searchsorted(time, ctimes[ifo2] - w)
right = numpy.searchsorted(time, ctimes[ifo2] + w)
# remove elements that will not form a coinc
# There is only at most one trigger for an existing coinc
# (assumes triggers spaced > slide step)
nz = (right - left).nonzero()
dep_ids = left[nz]
# The property that only one trigger can be within the window is ensured
# by the peak finding algorithm we use for each template.
# If that is modifed, this function may need to be
# extended.
if len(left) > 0 and (right - left).max() > 1:
raise ValueError('Somehow triggers are closer than time-delay window')
slide = slide[nz]
for ifo in ctimes:
ctimes[ifo] = ctimes[ifo][nz]
ids[ifo] = ids[ifo][nz]
# Add this detector now to the cumulative set and proceed to the next
# ifo coincidence test
ids[ifo1] = sort[dep_ids]
ctimes[ifo1] = otime[ids[ifo1]]
return ids, slide | def function[time_multi_coincidence, parameter[times, slide_step, slop, pivot, fixed]]:
constant[ Find multi detector concidences.
Parameters
----------
times: dict of numpy.ndarrays
Dictionary keyed by ifo of the times of each single detector trigger.
slide_step: float
The interval between time slides
slop: float
The amount of time to add to the TOF between detectors for coincidence
pivot: str
ifo used to test coincidence against in first stage
fixed: str
the other ifo used in the first stage coincidence which we'll use
as a fixed time reference for coincident triggers. All other detectors
are time slid by being fixed to this detector.
]
def function[win, parameter[ifo1, ifo2]]:
variable[d1] assign[=] call[name[Detector], parameter[name[ifo1]]]
variable[d2] assign[=] call[name[Detector], parameter[name[ifo2]]]
return[binary_operation[call[name[d1].light_travel_time_to_detector, parameter[name[d2]]] + name[slop]]]
<ast.Tuple object at 0x7da2054a6440> assign[=] call[name[time_coincidence], parameter[call[name[times]][name[pivot]], call[name[times]][name[fixed]], call[name[win], parameter[name[pivot], name[fixed]]]]]
variable[fixed_time] assign[=] call[call[name[times]][name[fixed]]][name[fix_id]]
variable[pivot_time] assign[=] binary_operation[call[call[name[times]][name[pivot]]][name[pivot_id]] - binary_operation[name[slide_step] * name[slide]]]
variable[ctimes] assign[=] dictionary[[<ast.Name object at 0x7da2054a6c20>, <ast.Name object at 0x7da2054a6a40>], [<ast.Name object at 0x7da2054a60e0>, <ast.Name object at 0x7da2054a7f70>]]
variable[ids] assign[=] dictionary[[<ast.Name object at 0x7da2054a5bd0>, <ast.Name object at 0x7da2054a4160>], [<ast.Name object at 0x7da2054a43d0>, <ast.Name object at 0x7da2054a4730>]]
variable[dep_ifos] assign[=] <ast.ListComp object at 0x7da2054a47f0>
for taget[name[ifo1]] in starred[name[dep_ifos]] begin[:]
variable[otime] assign[=] call[name[times]][name[ifo1]]
variable[sort] assign[=] call[call[name[times]][name[ifo1]].argsort, parameter[]]
variable[time] assign[=] call[name[otime]][name[sort]]
for taget[name[ifo2]] in starred[call[name[ids].keys, parameter[]]] begin[:]
variable[w] assign[=] call[name[win], parameter[name[ifo1], name[ifo2]]]
variable[left] assign[=] call[name[numpy].searchsorted, parameter[name[time], binary_operation[call[name[ctimes]][name[ifo2]] - name[w]]]]
variable[right] assign[=] call[name[numpy].searchsorted, parameter[name[time], binary_operation[call[name[ctimes]][name[ifo2]] + name[w]]]]
variable[nz] assign[=] call[binary_operation[name[right] - name[left]].nonzero, parameter[]]
variable[dep_ids] assign[=] call[name[left]][name[nz]]
if <ast.BoolOp object at 0x7da2054a4220> begin[:]
<ast.Raise object at 0x7da2054a5ff0>
variable[slide] assign[=] call[name[slide]][name[nz]]
for taget[name[ifo]] in starred[name[ctimes]] begin[:]
call[name[ctimes]][name[ifo]] assign[=] call[call[name[ctimes]][name[ifo]]][name[nz]]
call[name[ids]][name[ifo]] assign[=] call[call[name[ids]][name[ifo]]][name[nz]]
call[name[ids]][name[ifo1]] assign[=] call[name[sort]][name[dep_ids]]
call[name[ctimes]][name[ifo1]] assign[=] call[name[otime]][call[name[ids]][name[ifo1]]]
return[tuple[[<ast.Name object at 0x7da20c6aa980>, <ast.Name object at 0x7da20c6abca0>]]] | keyword[def] identifier[time_multi_coincidence] ( identifier[times] , identifier[slide_step] = literal[int] , identifier[slop] = literal[int] ,
identifier[pivot] = literal[string] , identifier[fixed] = literal[string] ):
literal[string]
keyword[def] identifier[win] ( identifier[ifo1] , identifier[ifo2] ):
identifier[d1] = identifier[Detector] ( identifier[ifo1] )
identifier[d2] = identifier[Detector] ( identifier[ifo2] )
keyword[return] identifier[d1] . identifier[light_travel_time_to_detector] ( identifier[d2] )+ identifier[slop]
identifier[pivot_id] , identifier[fix_id] , identifier[slide] = identifier[time_coincidence] ( identifier[times] [ identifier[pivot] ], identifier[times] [ identifier[fixed] ],
identifier[win] ( identifier[pivot] , identifier[fixed] ),
identifier[slide_step] = identifier[slide_step] )
identifier[fixed_time] = identifier[times] [ identifier[fixed] ][ identifier[fix_id] ]
identifier[pivot_time] = identifier[times] [ identifier[pivot] ][ identifier[pivot_id] ]- identifier[slide_step] * identifier[slide]
identifier[ctimes] ={ identifier[fixed] : identifier[fixed_time] , identifier[pivot] : identifier[pivot_time] }
identifier[ids] ={ identifier[fixed] : identifier[fix_id] , identifier[pivot] : identifier[pivot_id] }
identifier[dep_ifos] =[ identifier[ifo] keyword[for] identifier[ifo] keyword[in] identifier[times] . identifier[keys] () keyword[if] identifier[ifo] != identifier[fixed] keyword[and] identifier[ifo] != identifier[pivot] ]
keyword[for] identifier[ifo1] keyword[in] identifier[dep_ifos] :
identifier[otime] = identifier[times] [ identifier[ifo1] ]
identifier[sort] = identifier[times] [ identifier[ifo1] ]. identifier[argsort] ()
identifier[time] = identifier[otime] [ identifier[sort] ]
keyword[for] identifier[ifo2] keyword[in] identifier[ids] . identifier[keys] ():
identifier[w] = identifier[win] ( identifier[ifo1] , identifier[ifo2] )
identifier[left] = identifier[numpy] . identifier[searchsorted] ( identifier[time] , identifier[ctimes] [ identifier[ifo2] ]- identifier[w] )
identifier[right] = identifier[numpy] . identifier[searchsorted] ( identifier[time] , identifier[ctimes] [ identifier[ifo2] ]+ identifier[w] )
identifier[nz] =( identifier[right] - identifier[left] ). identifier[nonzero] ()
identifier[dep_ids] = identifier[left] [ identifier[nz] ]
keyword[if] identifier[len] ( identifier[left] )> literal[int] keyword[and] ( identifier[right] - identifier[left] ). identifier[max] ()> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[slide] = identifier[slide] [ identifier[nz] ]
keyword[for] identifier[ifo] keyword[in] identifier[ctimes] :
identifier[ctimes] [ identifier[ifo] ]= identifier[ctimes] [ identifier[ifo] ][ identifier[nz] ]
identifier[ids] [ identifier[ifo] ]= identifier[ids] [ identifier[ifo] ][ identifier[nz] ]
identifier[ids] [ identifier[ifo1] ]= identifier[sort] [ identifier[dep_ids] ]
identifier[ctimes] [ identifier[ifo1] ]= identifier[otime] [ identifier[ids] [ identifier[ifo1] ]]
keyword[return] identifier[ids] , identifier[slide] | def time_multi_coincidence(times, slide_step=0, slop=0.003, pivot='H1', fixed='L1'):
""" Find multi detector concidences.
Parameters
----------
times: dict of numpy.ndarrays
Dictionary keyed by ifo of the times of each single detector trigger.
slide_step: float
The interval between time slides
slop: float
The amount of time to add to the TOF between detectors for coincidence
pivot: str
ifo used to test coincidence against in first stage
fixed: str
the other ifo used in the first stage coincidence which we'll use
as a fixed time reference for coincident triggers. All other detectors
are time slid by being fixed to this detector.
"""
# pivots are used to determine standard coincidence triggers, we then
# pair off additional detectors to those.
def win(ifo1, ifo2):
d1 = Detector(ifo1)
d2 = Detector(ifo2)
return d1.light_travel_time_to_detector(d2) + slop
# Find coincs first between the two fully time-slid detectors
(pivot_id, fix_id, slide) = time_coincidence(times[pivot], times[fixed], win(pivot, fixed), slide_step=slide_step)
# additional detectors do not slide independently of the fixed one
# Each trigger in an additional detector must be concident with an
# existing coincident one. All times moved to 'fixed' relative time
fixed_time = times[fixed][fix_id]
pivot_time = times[pivot][pivot_id] - slide_step * slide
ctimes = {fixed: fixed_time, pivot: pivot_time}
ids = {fixed: fix_id, pivot: pivot_id}
dep_ifos = [ifo for ifo in times.keys() if ifo != fixed and ifo != pivot]
for ifo1 in dep_ifos:
otime = times[ifo1]
sort = times[ifo1].argsort()
time = otime[sort]
# Find coincidences between dependent ifo triggers and existing coinc.
for ifo2 in ids.keys():
# Currently assumes that additional detectors do not slide
# independently of the 'fixed one'
#
# To modify that assumption, the code here would be modified
# by adding a function that remaps the coinc time frame and unmaps
# it and the end of this loop.
# This remapping must ensure
# * function of the standard slide number
# * ensure all times remain within coincident segment
# * unbiased distribution of triggers after mapping.
w = win(ifo1, ifo2)
left = numpy.searchsorted(time, ctimes[ifo2] - w)
right = numpy.searchsorted(time, ctimes[ifo2] + w)
# remove elements that will not form a coinc
# There is only at most one trigger for an existing coinc
# (assumes triggers spaced > slide step)
nz = (right - left).nonzero()
dep_ids = left[nz]
# The property that only one trigger can be within the window is ensured
# by the peak finding algorithm we use for each template.
# If that is modifed, this function may need to be
# extended.
if len(left) > 0 and (right - left).max() > 1:
raise ValueError('Somehow triggers are closer than time-delay window') # depends on [control=['if'], data=[]]
slide = slide[nz]
for ifo in ctimes:
ctimes[ifo] = ctimes[ifo][nz]
ids[ifo] = ids[ifo][nz] # depends on [control=['for'], data=['ifo']] # depends on [control=['for'], data=['ifo2']]
# Add this detector now to the cumulative set and proceed to the next
# ifo coincidence test
ids[ifo1] = sort[dep_ids]
ctimes[ifo1] = otime[ids[ifo1]] # depends on [control=['for'], data=['ifo1']]
return (ids, slide) |
def info_dialog(self, title="Information", message="", **kwargs):
"""
Show an information dialog
Usage: C{dialog.info_dialog(title="Information", message="", **kwargs)}
@param title: window title for the dialog
@param message: message displayed in the dialog
@return: a tuple containing the exit code and user input
@rtype: C{tuple(int, str)}
"""
return self._run_zenity(title, ["--info", "--text", message], kwargs) | def function[info_dialog, parameter[self, title, message]]:
constant[
Show an information dialog
Usage: C{dialog.info_dialog(title="Information", message="", **kwargs)}
@param title: window title for the dialog
@param message: message displayed in the dialog
@return: a tuple containing the exit code and user input
@rtype: C{tuple(int, str)}
]
return[call[name[self]._run_zenity, parameter[name[title], list[[<ast.Constant object at 0x7da20cabeef0>, <ast.Constant object at 0x7da20cabf580>, <ast.Name object at 0x7da20cabc7f0>]], name[kwargs]]]] | keyword[def] identifier[info_dialog] ( identifier[self] , identifier[title] = literal[string] , identifier[message] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_run_zenity] ( identifier[title] ,[ literal[string] , literal[string] , identifier[message] ], identifier[kwargs] ) | def info_dialog(self, title='Information', message='', **kwargs):
"""
Show an information dialog
Usage: C{dialog.info_dialog(title="Information", message="", **kwargs)}
@param title: window title for the dialog
@param message: message displayed in the dialog
@return: a tuple containing the exit code and user input
@rtype: C{tuple(int, str)}
"""
return self._run_zenity(title, ['--info', '--text', message], kwargs) |
def image(random=random, width=800, height=600, https=False, *args, **kwargs):
"""
Generate the address of a placeholder image.
>>> mock_random.seed(0)
>>> image(random=mock_random)
'http://dummyimage.com/800x600/292929/e3e3e3&text=mighty poop'
>>> image(random=mock_random, width=60, height=60)
'http://placekitten.com/60/60'
>>> image(random=mock_random, width=1920, height=1080)
'http://dummyimage.com/1920x1080/292929/e3e3e3&text=To get to Westeros, you need to go to Britchestown, then drive west.'
>>> image(random=mock_random, https=True, width=1920, height=1080)
'https://dummyimage.com/1920x1080/292929/e3e3e3&text=East Mysteryhall is in Westeros.'
"""
target_fn = noun
if width+height > 300:
target_fn = thing
if width+height > 2000:
target_fn = sentence
s = ""
if https:
s = "s"
if random.choice([True, False]):
return "http{s}://dummyimage.com/{width}x{height}/292929/e3e3e3&text={text}".format(
s=s,
width=width,
height=height,
text=target_fn(random=random))
else:
return "http{s}://placekitten.com/{width}/{height}".format(s=s, width=width, height=height) | def function[image, parameter[random, width, height, https]]:
constant[
Generate the address of a placeholder image.
>>> mock_random.seed(0)
>>> image(random=mock_random)
'http://dummyimage.com/800x600/292929/e3e3e3&text=mighty poop'
>>> image(random=mock_random, width=60, height=60)
'http://placekitten.com/60/60'
>>> image(random=mock_random, width=1920, height=1080)
'http://dummyimage.com/1920x1080/292929/e3e3e3&text=To get to Westeros, you need to go to Britchestown, then drive west.'
>>> image(random=mock_random, https=True, width=1920, height=1080)
'https://dummyimage.com/1920x1080/292929/e3e3e3&text=East Mysteryhall is in Westeros.'
]
variable[target_fn] assign[=] name[noun]
if compare[binary_operation[name[width] + name[height]] greater[>] constant[300]] begin[:]
variable[target_fn] assign[=] name[thing]
if compare[binary_operation[name[width] + name[height]] greater[>] constant[2000]] begin[:]
variable[target_fn] assign[=] name[sentence]
variable[s] assign[=] constant[]
if name[https] begin[:]
variable[s] assign[=] constant[s]
if call[name[random].choice, parameter[list[[<ast.Constant object at 0x7da1b0c4dfc0>, <ast.Constant object at 0x7da1b0c4eb90>]]]] begin[:]
return[call[constant[http{s}://dummyimage.com/{width}x{height}/292929/e3e3e3&text={text}].format, parameter[]]] | keyword[def] identifier[image] ( identifier[random] = identifier[random] , identifier[width] = literal[int] , identifier[height] = literal[int] , identifier[https] = keyword[False] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[target_fn] = identifier[noun]
keyword[if] identifier[width] + identifier[height] > literal[int] :
identifier[target_fn] = identifier[thing]
keyword[if] identifier[width] + identifier[height] > literal[int] :
identifier[target_fn] = identifier[sentence]
identifier[s] = literal[string]
keyword[if] identifier[https] :
identifier[s] = literal[string]
keyword[if] identifier[random] . identifier[choice] ([ keyword[True] , keyword[False] ]):
keyword[return] literal[string] . identifier[format] (
identifier[s] = identifier[s] ,
identifier[width] = identifier[width] ,
identifier[height] = identifier[height] ,
identifier[text] = identifier[target_fn] ( identifier[random] = identifier[random] ))
keyword[else] :
keyword[return] literal[string] . identifier[format] ( identifier[s] = identifier[s] , identifier[width] = identifier[width] , identifier[height] = identifier[height] ) | def image(random=random, width=800, height=600, https=False, *args, **kwargs):
"""
Generate the address of a placeholder image.
>>> mock_random.seed(0)
>>> image(random=mock_random)
'http://dummyimage.com/800x600/292929/e3e3e3&text=mighty poop'
>>> image(random=mock_random, width=60, height=60)
'http://placekitten.com/60/60'
>>> image(random=mock_random, width=1920, height=1080)
'http://dummyimage.com/1920x1080/292929/e3e3e3&text=To get to Westeros, you need to go to Britchestown, then drive west.'
>>> image(random=mock_random, https=True, width=1920, height=1080)
'https://dummyimage.com/1920x1080/292929/e3e3e3&text=East Mysteryhall is in Westeros.'
"""
target_fn = noun
if width + height > 300:
target_fn = thing # depends on [control=['if'], data=[]]
if width + height > 2000:
target_fn = sentence # depends on [control=['if'], data=[]]
s = ''
if https:
s = 's' # depends on [control=['if'], data=[]]
if random.choice([True, False]):
return 'http{s}://dummyimage.com/{width}x{height}/292929/e3e3e3&text={text}'.format(s=s, width=width, height=height, text=target_fn(random=random)) # depends on [control=['if'], data=[]]
else:
return 'http{s}://placekitten.com/{width}/{height}'.format(s=s, width=width, height=height) |
def select_best_block(blocks):
"""Return best block selected based on simple heuristic."""
# TODO make this cleverer with more stats
if not blocks:
raise ValueError("No suitable blocks were found in assembly.")
best_block = max(blocks, key=lambda b: b[1]['packed_instr'])
if best_block[1]['packed_instr'] == 0:
best_block = max(blocks,
key=lambda b: (b[1]['ops'] + b[1]['packed_instr'] + b[1]['avx_instr'],
b[1]['ZMM'], b[1]['YMM'], b[1]['XMM']))
return best_block[0] | def function[select_best_block, parameter[blocks]]:
constant[Return best block selected based on simple heuristic.]
if <ast.UnaryOp object at 0x7da20c6abac0> begin[:]
<ast.Raise object at 0x7da20c6a89d0>
variable[best_block] assign[=] call[name[max], parameter[name[blocks]]]
if compare[call[call[name[best_block]][constant[1]]][constant[packed_instr]] equal[==] constant[0]] begin[:]
variable[best_block] assign[=] call[name[max], parameter[name[blocks]]]
return[call[name[best_block]][constant[0]]] | keyword[def] identifier[select_best_block] ( identifier[blocks] ):
literal[string]
keyword[if] keyword[not] identifier[blocks] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[best_block] = identifier[max] ( identifier[blocks] , identifier[key] = keyword[lambda] identifier[b] : identifier[b] [ literal[int] ][ literal[string] ])
keyword[if] identifier[best_block] [ literal[int] ][ literal[string] ]== literal[int] :
identifier[best_block] = identifier[max] ( identifier[blocks] ,
identifier[key] = keyword[lambda] identifier[b] :( identifier[b] [ literal[int] ][ literal[string] ]+ identifier[b] [ literal[int] ][ literal[string] ]+ identifier[b] [ literal[int] ][ literal[string] ],
identifier[b] [ literal[int] ][ literal[string] ], identifier[b] [ literal[int] ][ literal[string] ], identifier[b] [ literal[int] ][ literal[string] ]))
keyword[return] identifier[best_block] [ literal[int] ] | def select_best_block(blocks):
"""Return best block selected based on simple heuristic."""
# TODO make this cleverer with more stats
if not blocks:
raise ValueError('No suitable blocks were found in assembly.') # depends on [control=['if'], data=[]]
best_block = max(blocks, key=lambda b: b[1]['packed_instr'])
if best_block[1]['packed_instr'] == 0:
best_block = max(blocks, key=lambda b: (b[1]['ops'] + b[1]['packed_instr'] + b[1]['avx_instr'], b[1]['ZMM'], b[1]['YMM'], b[1]['XMM'])) # depends on [control=['if'], data=[]]
return best_block[0] |
def compute_index(self, axis, data_object, compute_diff=True):
"""Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
"""
def pandas_index_extraction(df, axis):
if not axis:
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(
axis=axis,
index_func=lambda df: pandas_index_extraction(df, axis),
old_blocks=old_blocks,
)
return index_obj[new_indices] if compute_diff else new_indices | def function[compute_index, parameter[self, axis, data_object, compute_diff]]:
constant[Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
]
def function[pandas_index_extraction, parameter[df, axis]]:
if <ast.UnaryOp object at 0x7da1b2346fb0> begin[:]
return[name[df].index]
variable[index_obj] assign[=] <ast.IfExp object at 0x7da1b23463b0>
variable[old_blocks] assign[=] <ast.IfExp object at 0x7da1b2346cb0>
variable[new_indices] assign[=] call[name[data_object].get_indices, parameter[]]
return[<ast.IfExp object at 0x7da1b2344190>] | keyword[def] identifier[compute_index] ( identifier[self] , identifier[axis] , identifier[data_object] , identifier[compute_diff] = keyword[True] ):
literal[string]
keyword[def] identifier[pandas_index_extraction] ( identifier[df] , identifier[axis] ):
keyword[if] keyword[not] identifier[axis] :
keyword[return] identifier[df] . identifier[index]
keyword[else] :
keyword[try] :
keyword[return] identifier[df] . identifier[columns]
keyword[except] identifier[AttributeError] :
keyword[return] identifier[pandas] . identifier[Index] ([])
identifier[index_obj] = identifier[self] . identifier[index] keyword[if] keyword[not] identifier[axis] keyword[else] identifier[self] . identifier[columns]
identifier[old_blocks] = identifier[self] . identifier[data] keyword[if] identifier[compute_diff] keyword[else] keyword[None]
identifier[new_indices] = identifier[data_object] . identifier[get_indices] (
identifier[axis] = identifier[axis] ,
identifier[index_func] = keyword[lambda] identifier[df] : identifier[pandas_index_extraction] ( identifier[df] , identifier[axis] ),
identifier[old_blocks] = identifier[old_blocks] ,
)
keyword[return] identifier[index_obj] [ identifier[new_indices] ] keyword[if] identifier[compute_diff] keyword[else] identifier[new_indices] | def compute_index(self, axis, data_object, compute_diff=True):
"""Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
"""
def pandas_index_extraction(df, axis):
if not axis:
return df.index # depends on [control=['if'], data=[]]
else:
try:
return df.columns # depends on [control=['try'], data=[]]
except AttributeError:
return pandas.Index([]) # depends on [control=['except'], data=[]]
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(axis=axis, index_func=lambda df: pandas_index_extraction(df, axis), old_blocks=old_blocks)
return index_obj[new_indices] if compute_diff else new_indices |
def dump(self, f, indent=''):
"""Dump this keyword to a file-like object"""
if self.__unit is None:
print(("%s%s %s" % (indent, self.__name, self.__value)).rstrip(), file=f)
else:
print(("%s%s [%s] %s" % (indent, self.__name, self.__unit, self.__value)).rstrip(), file=f) | def function[dump, parameter[self, f, indent]]:
constant[Dump this keyword to a file-like object]
if compare[name[self].__unit is constant[None]] begin[:]
call[name[print], parameter[call[binary_operation[constant[%s%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f02aa0>, <ast.Attribute object at 0x7da207f02ef0>, <ast.Attribute object at 0x7da207f00700>]]].rstrip, parameter[]]]] | keyword[def] identifier[dump] ( identifier[self] , identifier[f] , identifier[indent] = literal[string] ):
literal[string]
keyword[if] identifier[self] . identifier[__unit] keyword[is] keyword[None] :
identifier[print] (( literal[string] %( identifier[indent] , identifier[self] . identifier[__name] , identifier[self] . identifier[__value] )). identifier[rstrip] (), identifier[file] = identifier[f] )
keyword[else] :
identifier[print] (( literal[string] %( identifier[indent] , identifier[self] . identifier[__name] , identifier[self] . identifier[__unit] , identifier[self] . identifier[__value] )). identifier[rstrip] (), identifier[file] = identifier[f] ) | def dump(self, f, indent=''):
"""Dump this keyword to a file-like object"""
if self.__unit is None:
print(('%s%s %s' % (indent, self.__name, self.__value)).rstrip(), file=f) # depends on [control=['if'], data=[]]
else:
print(('%s%s [%s] %s' % (indent, self.__name, self.__unit, self.__value)).rstrip(), file=f) |
def isemptyfile(filepath):
"""Determine if the file both exists and isempty
Args:
filepath (str, path): file path
Returns:
bool
"""
exists = os.path.exists(safepath(filepath))
if exists:
filesize = os.path.getsize(safepath(filepath))
return filesize == 0
else:
return False | def function[isemptyfile, parameter[filepath]]:
constant[Determine if the file both exists and isempty
Args:
filepath (str, path): file path
Returns:
bool
]
variable[exists] assign[=] call[name[os].path.exists, parameter[call[name[safepath], parameter[name[filepath]]]]]
if name[exists] begin[:]
variable[filesize] assign[=] call[name[os].path.getsize, parameter[call[name[safepath], parameter[name[filepath]]]]]
return[compare[name[filesize] equal[==] constant[0]]] | keyword[def] identifier[isemptyfile] ( identifier[filepath] ):
literal[string]
identifier[exists] = identifier[os] . identifier[path] . identifier[exists] ( identifier[safepath] ( identifier[filepath] ))
keyword[if] identifier[exists] :
identifier[filesize] = identifier[os] . identifier[path] . identifier[getsize] ( identifier[safepath] ( identifier[filepath] ))
keyword[return] identifier[filesize] == literal[int]
keyword[else] :
keyword[return] keyword[False] | def isemptyfile(filepath):
"""Determine if the file both exists and isempty
Args:
filepath (str, path): file path
Returns:
bool
"""
exists = os.path.exists(safepath(filepath))
if exists:
filesize = os.path.getsize(safepath(filepath))
return filesize == 0 # depends on [control=['if'], data=[]]
else:
return False |
def add_column(self, func, name=None, show=True):
"""Add a column function which takes an id as argument and
returns a value."""
assert func
name = name or func.__name__
if name == '<lambda>':
raise ValueError("Please provide a valid name for " + name)
d = {'func': func,
'show': show,
}
self._columns[name] = d
# Update the headers in the widget.
data = _create_json_dict(cols=self.column_names,
)
self.eval_js('table.setHeaders({});'.format(data))
return func | def function[add_column, parameter[self, func, name, show]]:
constant[Add a column function which takes an id as argument and
returns a value.]
assert[name[func]]
variable[name] assign[=] <ast.BoolOp object at 0x7da1b120b1f0>
if compare[name[name] equal[==] constant[<lambda>]] begin[:]
<ast.Raise object at 0x7da1b12093c0>
variable[d] assign[=] dictionary[[<ast.Constant object at 0x7da1b12083a0>, <ast.Constant object at 0x7da1b1209cc0>], [<ast.Name object at 0x7da1b120b910>, <ast.Name object at 0x7da1b1209c60>]]
call[name[self]._columns][name[name]] assign[=] name[d]
variable[data] assign[=] call[name[_create_json_dict], parameter[]]
call[name[self].eval_js, parameter[call[constant[table.setHeaders({});].format, parameter[name[data]]]]]
return[name[func]] | keyword[def] identifier[add_column] ( identifier[self] , identifier[func] , identifier[name] = keyword[None] , identifier[show] = keyword[True] ):
literal[string]
keyword[assert] identifier[func]
identifier[name] = identifier[name] keyword[or] identifier[func] . identifier[__name__]
keyword[if] identifier[name] == literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[name] )
identifier[d] ={ literal[string] : identifier[func] ,
literal[string] : identifier[show] ,
}
identifier[self] . identifier[_columns] [ identifier[name] ]= identifier[d]
identifier[data] = identifier[_create_json_dict] ( identifier[cols] = identifier[self] . identifier[column_names] ,
)
identifier[self] . identifier[eval_js] ( literal[string] . identifier[format] ( identifier[data] ))
keyword[return] identifier[func] | def add_column(self, func, name=None, show=True):
"""Add a column function which takes an id as argument and
returns a value."""
assert func
name = name or func.__name__
if name == '<lambda>':
raise ValueError('Please provide a valid name for ' + name) # depends on [control=['if'], data=['name']]
d = {'func': func, 'show': show}
self._columns[name] = d
# Update the headers in the widget.
data = _create_json_dict(cols=self.column_names)
self.eval_js('table.setHeaders({});'.format(data))
return func |
def predict(self, X):
"""Predict risk score of experiencing an event.
Higher scores indicate shorter survival (high risk),
lower scores longer survival (low risk).
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted risk.
"""
K = self._get_kernel(X, self.X_fit_)
pred = -numpy.dot(self.coef_, K.T)
return pred.ravel() | def function[predict, parameter[self, X]]:
constant[Predict risk score of experiencing an event.
Higher scores indicate shorter survival (high risk),
lower scores longer survival (low risk).
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted risk.
]
variable[K] assign[=] call[name[self]._get_kernel, parameter[name[X], name[self].X_fit_]]
variable[pred] assign[=] <ast.UnaryOp object at 0x7da1b17e2e90>
return[call[name[pred].ravel, parameter[]]] | keyword[def] identifier[predict] ( identifier[self] , identifier[X] ):
literal[string]
identifier[K] = identifier[self] . identifier[_get_kernel] ( identifier[X] , identifier[self] . identifier[X_fit_] )
identifier[pred] =- identifier[numpy] . identifier[dot] ( identifier[self] . identifier[coef_] , identifier[K] . identifier[T] )
keyword[return] identifier[pred] . identifier[ravel] () | def predict(self, X):
"""Predict risk score of experiencing an event.
Higher scores indicate shorter survival (high risk),
lower scores longer survival (low risk).
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted risk.
"""
K = self._get_kernel(X, self.X_fit_)
pred = -numpy.dot(self.coef_, K.T)
return pred.ravel() |
def update(self, id, name, incident_preference):
"""
This API endpoint allows you to update an alert policy
:type id: integer
:param id: The id of the policy
:type name: str
:param name: The name of the policy
:type incident_preference: str
:param incident_preference: Can be PER_POLICY, PER_CONDITION or
PER_CONDITION_AND_TARGET
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"created_at": "time",
"id": "integer",
"incident_preference": "string",
"name": "string",
"updated_at": "time"
}
}
"""
data = {
"policy": {
"name": name,
"incident_preference": incident_preference
}
}
return self._put(
url='{0}alerts_policies/{1}.json'.format(self.URL, id),
headers=self.headers,
data=data
) | def function[update, parameter[self, id, name, incident_preference]]:
constant[
This API endpoint allows you to update an alert policy
:type id: integer
:param id: The id of the policy
:type name: str
:param name: The name of the policy
:type incident_preference: str
:param incident_preference: Can be PER_POLICY, PER_CONDITION or
PER_CONDITION_AND_TARGET
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"created_at": "time",
"id": "integer",
"incident_preference": "string",
"name": "string",
"updated_at": "time"
}
}
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0d19060>], [<ast.Dict object at 0x7da1b0d185b0>]]
return[call[name[self]._put, parameter[]]] | keyword[def] identifier[update] ( identifier[self] , identifier[id] , identifier[name] , identifier[incident_preference] ):
literal[string]
identifier[data] ={
literal[string] :{
literal[string] : identifier[name] ,
literal[string] : identifier[incident_preference]
}
}
keyword[return] identifier[self] . identifier[_put] (
identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[URL] , identifier[id] ),
identifier[headers] = identifier[self] . identifier[headers] ,
identifier[data] = identifier[data]
) | def update(self, id, name, incident_preference):
"""
This API endpoint allows you to update an alert policy
:type id: integer
:param id: The id of the policy
:type name: str
:param name: The name of the policy
:type incident_preference: str
:param incident_preference: Can be PER_POLICY, PER_CONDITION or
PER_CONDITION_AND_TARGET
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"created_at": "time",
"id": "integer",
"incident_preference": "string",
"name": "string",
"updated_at": "time"
}
}
"""
data = {'policy': {'name': name, 'incident_preference': incident_preference}}
return self._put(url='{0}alerts_policies/{1}.json'.format(self.URL, id), headers=self.headers, data=data) |
def insert_taxon_in_new_fasta_file(self, aln):
"""primer4clades infers the codon usage table from the taxon names in the
sequences.
These names need to be enclosed by square brackets and be
present in the description of the FASTA sequence. The position is not
important. I will insert the names in the description in a new FASTA
file.
Returns:
Filename of modified FASTA file that includes the name of the taxon.
"""
new_seq_records = []
for seq_record in SeqIO.parse(aln, 'fasta'):
new_seq_record_id = "[{0}] {1}".format(self.taxon_for_codon_usage, seq_record.id)
new_seq_record = SeqRecord(seq_record.seq, id=new_seq_record_id)
new_seq_records.append(new_seq_record)
base_filename = os.path.splitext(aln)
new_filename = '{0}_modified{1}'.format(base_filename[0], base_filename[1])
SeqIO.write(new_seq_records, new_filename, "fasta")
return new_filename | def function[insert_taxon_in_new_fasta_file, parameter[self, aln]]:
constant[primer4clades infers the codon usage table from the taxon names in the
sequences.
These names need to be enclosed by square brackets and be
present in the description of the FASTA sequence. The position is not
important. I will insert the names in the description in a new FASTA
file.
Returns:
Filename of modified FASTA file that includes the name of the taxon.
]
variable[new_seq_records] assign[=] list[[]]
for taget[name[seq_record]] in starred[call[name[SeqIO].parse, parameter[name[aln], constant[fasta]]]] begin[:]
variable[new_seq_record_id] assign[=] call[constant[[{0}] {1}].format, parameter[name[self].taxon_for_codon_usage, name[seq_record].id]]
variable[new_seq_record] assign[=] call[name[SeqRecord], parameter[name[seq_record].seq]]
call[name[new_seq_records].append, parameter[name[new_seq_record]]]
variable[base_filename] assign[=] call[name[os].path.splitext, parameter[name[aln]]]
variable[new_filename] assign[=] call[constant[{0}_modified{1}].format, parameter[call[name[base_filename]][constant[0]], call[name[base_filename]][constant[1]]]]
call[name[SeqIO].write, parameter[name[new_seq_records], name[new_filename], constant[fasta]]]
return[name[new_filename]] | keyword[def] identifier[insert_taxon_in_new_fasta_file] ( identifier[self] , identifier[aln] ):
literal[string]
identifier[new_seq_records] =[]
keyword[for] identifier[seq_record] keyword[in] identifier[SeqIO] . identifier[parse] ( identifier[aln] , literal[string] ):
identifier[new_seq_record_id] = literal[string] . identifier[format] ( identifier[self] . identifier[taxon_for_codon_usage] , identifier[seq_record] . identifier[id] )
identifier[new_seq_record] = identifier[SeqRecord] ( identifier[seq_record] . identifier[seq] , identifier[id] = identifier[new_seq_record_id] )
identifier[new_seq_records] . identifier[append] ( identifier[new_seq_record] )
identifier[base_filename] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[aln] )
identifier[new_filename] = literal[string] . identifier[format] ( identifier[base_filename] [ literal[int] ], identifier[base_filename] [ literal[int] ])
identifier[SeqIO] . identifier[write] ( identifier[new_seq_records] , identifier[new_filename] , literal[string] )
keyword[return] identifier[new_filename] | def insert_taxon_in_new_fasta_file(self, aln):
"""primer4clades infers the codon usage table from the taxon names in the
sequences.
These names need to be enclosed by square brackets and be
present in the description of the FASTA sequence. The position is not
important. I will insert the names in the description in a new FASTA
file.
Returns:
Filename of modified FASTA file that includes the name of the taxon.
"""
new_seq_records = []
for seq_record in SeqIO.parse(aln, 'fasta'):
new_seq_record_id = '[{0}] {1}'.format(self.taxon_for_codon_usage, seq_record.id)
new_seq_record = SeqRecord(seq_record.seq, id=new_seq_record_id)
new_seq_records.append(new_seq_record) # depends on [control=['for'], data=['seq_record']]
base_filename = os.path.splitext(aln)
new_filename = '{0}_modified{1}'.format(base_filename[0], base_filename[1])
SeqIO.write(new_seq_records, new_filename, 'fasta')
return new_filename |
def name(self):
"""
The entry’s base filename, relative to the scandir() path argument.
Returns:
str: name.
"""
name = self._name.rstrip('/')
if self._bytes_path:
name = fsencode(name)
return name | def function[name, parameter[self]]:
constant[
The entry’s base filename, relative to the scandir() path argument.
Returns:
str: name.
]
variable[name] assign[=] call[name[self]._name.rstrip, parameter[constant[/]]]
if name[self]._bytes_path begin[:]
variable[name] assign[=] call[name[fsencode], parameter[name[name]]]
return[name[name]] | keyword[def] identifier[name] ( identifier[self] ):
literal[string]
identifier[name] = identifier[self] . identifier[_name] . identifier[rstrip] ( literal[string] )
keyword[if] identifier[self] . identifier[_bytes_path] :
identifier[name] = identifier[fsencode] ( identifier[name] )
keyword[return] identifier[name] | def name(self):
"""
The entry’s base filename, relative to the scandir() path argument.
Returns:
str: name.
"""
name = self._name.rstrip('/')
if self._bytes_path:
name = fsencode(name) # depends on [control=['if'], data=[]]
return name |
def find_next_word_beginning(self, count=1, WORD=False):
"""
Return an index relative to the cursor position pointing to the start
of the next word. Return `None` if nothing was found.
"""
if count < 0:
return self.find_previous_word_beginning(count=-count, WORD=WORD)
regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE
iterator = regex.finditer(self.text_after_cursor)
try:
for i, match in enumerate(iterator):
# Take first match, unless it's the word on which we're right now.
if i == 0 and match.start(1) == 0:
count += 1
if i + 1 == count:
return match.start(1)
except StopIteration:
pass | def function[find_next_word_beginning, parameter[self, count, WORD]]:
constant[
Return an index relative to the cursor position pointing to the start
of the next word. Return `None` if nothing was found.
]
if compare[name[count] less[<] constant[0]] begin[:]
return[call[name[self].find_previous_word_beginning, parameter[]]]
variable[regex] assign[=] <ast.IfExp object at 0x7da18f00e980>
variable[iterator] assign[=] call[name[regex].finditer, parameter[name[self].text_after_cursor]]
<ast.Try object at 0x7da18f00f010> | keyword[def] identifier[find_next_word_beginning] ( identifier[self] , identifier[count] = literal[int] , identifier[WORD] = keyword[False] ):
literal[string]
keyword[if] identifier[count] < literal[int] :
keyword[return] identifier[self] . identifier[find_previous_word_beginning] ( identifier[count] =- identifier[count] , identifier[WORD] = identifier[WORD] )
identifier[regex] = identifier[_FIND_BIG_WORD_RE] keyword[if] identifier[WORD] keyword[else] identifier[_FIND_WORD_RE]
identifier[iterator] = identifier[regex] . identifier[finditer] ( identifier[self] . identifier[text_after_cursor] )
keyword[try] :
keyword[for] identifier[i] , identifier[match] keyword[in] identifier[enumerate] ( identifier[iterator] ):
keyword[if] identifier[i] == literal[int] keyword[and] identifier[match] . identifier[start] ( literal[int] )== literal[int] :
identifier[count] += literal[int]
keyword[if] identifier[i] + literal[int] == identifier[count] :
keyword[return] identifier[match] . identifier[start] ( literal[int] )
keyword[except] identifier[StopIteration] :
keyword[pass] | def find_next_word_beginning(self, count=1, WORD=False):
"""
Return an index relative to the cursor position pointing to the start
of the next word. Return `None` if nothing was found.
"""
if count < 0:
return self.find_previous_word_beginning(count=-count, WORD=WORD) # depends on [control=['if'], data=['count']]
regex = _FIND_BIG_WORD_RE if WORD else _FIND_WORD_RE
iterator = regex.finditer(self.text_after_cursor)
try:
for (i, match) in enumerate(iterator):
# Take first match, unless it's the word on which we're right now.
if i == 0 and match.start(1) == 0:
count += 1 # depends on [control=['if'], data=[]]
if i + 1 == count:
return match.start(1) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except StopIteration:
pass # depends on [control=['except'], data=[]] |
def run(self):
"""The main loop of the frontend. Here incoming messages from the service
are processed and forwarded to the corresponding callback methods."""
self.log.debug("Entered main loop")
while not self.shutdown:
# If no service is running slow down the main loop
if not self._pipe_service:
time.sleep(0.3)
self.update_status()
# While a service is running, check for incoming messages from that service
if self._pipe_service and self._pipe_service.poll(1):
try:
message = self._pipe_service.recv()
if isinstance(message, dict) and "band" in message:
# only dictionaries with 'band' entry are valid messages
try:
handler = getattr(self, "parse_band_" + message["band"])
except AttributeError:
handler = None
self.log.warning("Unknown band %s", str(message["band"]))
if handler:
# try:
handler(message)
# except Exception:
# print('Uh oh. What to do.')
else:
self.log.warning("Invalid message received %s", str(message))
except EOFError:
# Service has gone away
error_message = False
if self._service_status == CommonService.SERVICE_STATUS_END:
self.log.info("Service terminated")
elif self._service_status == CommonService.SERVICE_STATUS_ERROR:
error_message = "Service terminated with error code"
elif self._service_status in (
CommonService.SERVICE_STATUS_NONE,
CommonService.SERVICE_STATUS_NEW,
CommonService.SERVICE_STATUS_STARTING,
):
error_message = (
"Service may have died unexpectedly in "
+ "initialization (last known status: %s)"
% CommonService.human_readable_state.get(
self._service_status, self._service_status
)
)
else:
error_message = (
"Service may have died unexpectedly"
" (last known status: %s)"
% CommonService.human_readable_state.get(
self._service_status, self._service_status
)
)
if error_message:
self.log.error(error_message)
self._terminate_service()
if self.restart_service:
self.exponential_backoff()
else:
self.shutdown = True
if error_message:
raise workflows.Error(error_message)
with self.__lock:
if (
self._service is None
and self.restart_service
and self._service_factory
):
self.update_status(status_code=CommonService.SERVICE_STATUS_NEW)
self.switch_service()
# Check that the transport is alive
if not self._transport.is_connected():
self._terminate_service()
raise workflows.Error("Lost transport layer connection")
self.log.debug("Left main loop")
self.update_status(status_code=CommonService.SERVICE_STATUS_TEARDOWN)
self._terminate_service()
self.log.debug("Terminating.") | def function[run, parameter[self]]:
constant[The main loop of the frontend. Here incoming messages from the service
are processed and forwarded to the corresponding callback methods.]
call[name[self].log.debug, parameter[constant[Entered main loop]]]
while <ast.UnaryOp object at 0x7da18c4cc8e0> begin[:]
if <ast.UnaryOp object at 0x7da18c4ce7a0> begin[:]
call[name[time].sleep, parameter[constant[0.3]]]
call[name[self].update_status, parameter[]]
if <ast.BoolOp object at 0x7da18c4cec20> begin[:]
<ast.Try object at 0x7da18c4ccf40>
with name[self].__lock begin[:]
if <ast.BoolOp object at 0x7da18c4ce620> begin[:]
call[name[self].update_status, parameter[]]
call[name[self].switch_service, parameter[]]
if <ast.UnaryOp object at 0x7da18c4cf130> begin[:]
call[name[self]._terminate_service, parameter[]]
<ast.Raise object at 0x7da18c4cd1b0>
call[name[self].log.debug, parameter[constant[Left main loop]]]
call[name[self].update_status, parameter[]]
call[name[self]._terminate_service, parameter[]]
call[name[self].log.debug, parameter[constant[Terminating.]]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
keyword[while] keyword[not] identifier[self] . identifier[shutdown] :
keyword[if] keyword[not] identifier[self] . identifier[_pipe_service] :
identifier[time] . identifier[sleep] ( literal[int] )
identifier[self] . identifier[update_status] ()
keyword[if] identifier[self] . identifier[_pipe_service] keyword[and] identifier[self] . identifier[_pipe_service] . identifier[poll] ( literal[int] ):
keyword[try] :
identifier[message] = identifier[self] . identifier[_pipe_service] . identifier[recv] ()
keyword[if] identifier[isinstance] ( identifier[message] , identifier[dict] ) keyword[and] literal[string] keyword[in] identifier[message] :
keyword[try] :
identifier[handler] = identifier[getattr] ( identifier[self] , literal[string] + identifier[message] [ literal[string] ])
keyword[except] identifier[AttributeError] :
identifier[handler] = keyword[None]
identifier[self] . identifier[log] . identifier[warning] ( literal[string] , identifier[str] ( identifier[message] [ literal[string] ]))
keyword[if] identifier[handler] :
identifier[handler] ( identifier[message] )
keyword[else] :
identifier[self] . identifier[log] . identifier[warning] ( literal[string] , identifier[str] ( identifier[message] ))
keyword[except] identifier[EOFError] :
identifier[error_message] = keyword[False]
keyword[if] identifier[self] . identifier[_service_status] == identifier[CommonService] . identifier[SERVICE_STATUS_END] :
identifier[self] . identifier[log] . identifier[info] ( literal[string] )
keyword[elif] identifier[self] . identifier[_service_status] == identifier[CommonService] . identifier[SERVICE_STATUS_ERROR] :
identifier[error_message] = literal[string]
keyword[elif] identifier[self] . identifier[_service_status] keyword[in] (
identifier[CommonService] . identifier[SERVICE_STATUS_NONE] ,
identifier[CommonService] . identifier[SERVICE_STATUS_NEW] ,
identifier[CommonService] . identifier[SERVICE_STATUS_STARTING] ,
):
identifier[error_message] =(
literal[string]
+ literal[string]
% identifier[CommonService] . identifier[human_readable_state] . identifier[get] (
identifier[self] . identifier[_service_status] , identifier[self] . identifier[_service_status]
)
)
keyword[else] :
identifier[error_message] =(
literal[string]
literal[string]
% identifier[CommonService] . identifier[human_readable_state] . identifier[get] (
identifier[self] . identifier[_service_status] , identifier[self] . identifier[_service_status]
)
)
keyword[if] identifier[error_message] :
identifier[self] . identifier[log] . identifier[error] ( identifier[error_message] )
identifier[self] . identifier[_terminate_service] ()
keyword[if] identifier[self] . identifier[restart_service] :
identifier[self] . identifier[exponential_backoff] ()
keyword[else] :
identifier[self] . identifier[shutdown] = keyword[True]
keyword[if] identifier[error_message] :
keyword[raise] identifier[workflows] . identifier[Error] ( identifier[error_message] )
keyword[with] identifier[self] . identifier[__lock] :
keyword[if] (
identifier[self] . identifier[_service] keyword[is] keyword[None]
keyword[and] identifier[self] . identifier[restart_service]
keyword[and] identifier[self] . identifier[_service_factory]
):
identifier[self] . identifier[update_status] ( identifier[status_code] = identifier[CommonService] . identifier[SERVICE_STATUS_NEW] )
identifier[self] . identifier[switch_service] ()
keyword[if] keyword[not] identifier[self] . identifier[_transport] . identifier[is_connected] ():
identifier[self] . identifier[_terminate_service] ()
keyword[raise] identifier[workflows] . identifier[Error] ( literal[string] )
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
identifier[self] . identifier[update_status] ( identifier[status_code] = identifier[CommonService] . identifier[SERVICE_STATUS_TEARDOWN] )
identifier[self] . identifier[_terminate_service] ()
identifier[self] . identifier[log] . identifier[debug] ( literal[string] ) | def run(self):
"""The main loop of the frontend. Here incoming messages from the service
are processed and forwarded to the corresponding callback methods."""
self.log.debug('Entered main loop')
while not self.shutdown:
# If no service is running slow down the main loop
if not self._pipe_service:
time.sleep(0.3) # depends on [control=['if'], data=[]]
self.update_status()
# While a service is running, check for incoming messages from that service
if self._pipe_service and self._pipe_service.poll(1):
try:
message = self._pipe_service.recv()
if isinstance(message, dict) and 'band' in message:
# only dictionaries with 'band' entry are valid messages
try:
handler = getattr(self, 'parse_band_' + message['band']) # depends on [control=['try'], data=[]]
except AttributeError:
handler = None
self.log.warning('Unknown band %s', str(message['band'])) # depends on [control=['except'], data=[]]
if handler:
# try:
handler(message) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# except Exception:
# print('Uh oh. What to do.')
self.log.warning('Invalid message received %s', str(message)) # depends on [control=['try'], data=[]]
except EOFError:
# Service has gone away
error_message = False
if self._service_status == CommonService.SERVICE_STATUS_END:
self.log.info('Service terminated') # depends on [control=['if'], data=[]]
elif self._service_status == CommonService.SERVICE_STATUS_ERROR:
error_message = 'Service terminated with error code' # depends on [control=['if'], data=[]]
elif self._service_status in (CommonService.SERVICE_STATUS_NONE, CommonService.SERVICE_STATUS_NEW, CommonService.SERVICE_STATUS_STARTING):
error_message = 'Service may have died unexpectedly in ' + 'initialization (last known status: %s)' % CommonService.human_readable_state.get(self._service_status, self._service_status) # depends on [control=['if'], data=[]]
else:
error_message = 'Service may have died unexpectedly (last known status: %s)' % CommonService.human_readable_state.get(self._service_status, self._service_status)
if error_message:
self.log.error(error_message) # depends on [control=['if'], data=[]]
self._terminate_service()
if self.restart_service:
self.exponential_backoff() # depends on [control=['if'], data=[]]
else:
self.shutdown = True
if error_message:
raise workflows.Error(error_message) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
with self.__lock:
if self._service is None and self.restart_service and self._service_factory:
self.update_status(status_code=CommonService.SERVICE_STATUS_NEW)
self.switch_service() # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
# Check that the transport is alive
if not self._transport.is_connected():
self._terminate_service()
raise workflows.Error('Lost transport layer connection') # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
self.log.debug('Left main loop')
self.update_status(status_code=CommonService.SERVICE_STATUS_TEARDOWN)
self._terminate_service()
self.log.debug('Terminating.') |
def deleteMask(self,signature):
""" Delete just the mask that matches the signature given."""
if signature in self.masklist:
self.masklist[signature] = None
else:
log.warning("No matching mask") | def function[deleteMask, parameter[self, signature]]:
constant[ Delete just the mask that matches the signature given.]
if compare[name[signature] in name[self].masklist] begin[:]
call[name[self].masklist][name[signature]] assign[=] constant[None] | keyword[def] identifier[deleteMask] ( identifier[self] , identifier[signature] ):
literal[string]
keyword[if] identifier[signature] keyword[in] identifier[self] . identifier[masklist] :
identifier[self] . identifier[masklist] [ identifier[signature] ]= keyword[None]
keyword[else] :
identifier[log] . identifier[warning] ( literal[string] ) | def deleteMask(self, signature):
""" Delete just the mask that matches the signature given."""
if signature in self.masklist:
self.masklist[signature] = None # depends on [control=['if'], data=['signature']]
else:
log.warning('No matching mask') |
def _get_serializer(self, _type):
"""Gets a serializer for a particular type. For primitives, returns the
serializer from the module-level serializers.
For arrays and objects, uses the special _get_T_serializer methods to
build the encoders and decoders.
"""
if _type in _serializers: # _serializers is module level
return _serializers[_type]
# array and object are special types
elif _type == 'array':
return self._get_array_serializer()
elif _type == 'object':
return self._get_object_serializer()
raise ValueError('Unknown type: {}'.format(_type)) | def function[_get_serializer, parameter[self, _type]]:
constant[Gets a serializer for a particular type. For primitives, returns the
serializer from the module-level serializers.
For arrays and objects, uses the special _get_T_serializer methods to
build the encoders and decoders.
]
if compare[name[_type] in name[_serializers]] begin[:]
return[call[name[_serializers]][name[_type]]]
<ast.Raise object at 0x7da1b2555e70> | keyword[def] identifier[_get_serializer] ( identifier[self] , identifier[_type] ):
literal[string]
keyword[if] identifier[_type] keyword[in] identifier[_serializers] :
keyword[return] identifier[_serializers] [ identifier[_type] ]
keyword[elif] identifier[_type] == literal[string] :
keyword[return] identifier[self] . identifier[_get_array_serializer] ()
keyword[elif] identifier[_type] == literal[string] :
keyword[return] identifier[self] . identifier[_get_object_serializer] ()
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[_type] )) | def _get_serializer(self, _type):
"""Gets a serializer for a particular type. For primitives, returns the
serializer from the module-level serializers.
For arrays and objects, uses the special _get_T_serializer methods to
build the encoders and decoders.
"""
if _type in _serializers: # _serializers is module level
return _serializers[_type] # depends on [control=['if'], data=['_type', '_serializers']]
# array and object are special types
elif _type == 'array':
return self._get_array_serializer() # depends on [control=['if'], data=[]]
elif _type == 'object':
return self._get_object_serializer() # depends on [control=['if'], data=[]]
raise ValueError('Unknown type: {}'.format(_type)) |
def recover_constants(py_source,
replacements): #now has n^2 complexity. improve to n
'''Converts identifiers representing Js constants to the PyJs constants
PyJsNumberConst_1_ which has the true value of 5 will be converted to PyJsNumber(5)'''
for identifier, value in replacements.iteritems():
if identifier.startswith('PyJsConstantRegExp'):
py_source = py_source.replace(identifier,
'JsRegExp(%s)' % repr(value))
elif identifier.startswith('PyJsConstantString'):
py_source = py_source.replace(
identifier, 'Js(u%s)' % unify_string_literals(value))
else:
py_source = py_source.replace(identifier, 'Js(%s)' % value)
return py_source | def function[recover_constants, parameter[py_source, replacements]]:
constant[Converts identifiers representing Js constants to the PyJs constants
PyJsNumberConst_1_ which has the true value of 5 will be converted to PyJsNumber(5)]
for taget[tuple[[<ast.Name object at 0x7da20c795690>, <ast.Name object at 0x7da20c794460>]]] in starred[call[name[replacements].iteritems, parameter[]]] begin[:]
if call[name[identifier].startswith, parameter[constant[PyJsConstantRegExp]]] begin[:]
variable[py_source] assign[=] call[name[py_source].replace, parameter[name[identifier], binary_operation[constant[JsRegExp(%s)] <ast.Mod object at 0x7da2590d6920> call[name[repr], parameter[name[value]]]]]]
return[name[py_source]] | keyword[def] identifier[recover_constants] ( identifier[py_source] ,
identifier[replacements] ):
literal[string]
keyword[for] identifier[identifier] , identifier[value] keyword[in] identifier[replacements] . identifier[iteritems] ():
keyword[if] identifier[identifier] . identifier[startswith] ( literal[string] ):
identifier[py_source] = identifier[py_source] . identifier[replace] ( identifier[identifier] ,
literal[string] % identifier[repr] ( identifier[value] ))
keyword[elif] identifier[identifier] . identifier[startswith] ( literal[string] ):
identifier[py_source] = identifier[py_source] . identifier[replace] (
identifier[identifier] , literal[string] % identifier[unify_string_literals] ( identifier[value] ))
keyword[else] :
identifier[py_source] = identifier[py_source] . identifier[replace] ( identifier[identifier] , literal[string] % identifier[value] )
keyword[return] identifier[py_source] | def recover_constants(py_source, replacements): #now has n^2 complexity. improve to n
'Converts identifiers representing Js constants to the PyJs constants\n PyJsNumberConst_1_ which has the true value of 5 will be converted to PyJsNumber(5)'
for (identifier, value) in replacements.iteritems():
if identifier.startswith('PyJsConstantRegExp'):
py_source = py_source.replace(identifier, 'JsRegExp(%s)' % repr(value)) # depends on [control=['if'], data=[]]
elif identifier.startswith('PyJsConstantString'):
py_source = py_source.replace(identifier, 'Js(u%s)' % unify_string_literals(value)) # depends on [control=['if'], data=[]]
else:
py_source = py_source.replace(identifier, 'Js(%s)' % value) # depends on [control=['for'], data=[]]
return py_source |
def get_client_by_appid(self, authorizer_appid):
"""
通过 authorizer_appid 获取 Client 对象
:params authorizer_appid: 授权公众号appid
"""
access_token_key = '{0}_access_token'.format(authorizer_appid)
refresh_token_key = '{0}_refresh_token'.format(authorizer_appid)
access_token = self.session.get(access_token_key)
refresh_token = self.session.get(refresh_token_key)
assert refresh_token
if not access_token:
ret = self.refresh_authorizer_token(
authorizer_appid,
refresh_token
)
access_token = ret['authorizer_access_token']
refresh_token = ret['authorizer_refresh_token']
access_token_key = '{0}_access_token'.format(authorizer_appid)
expires_in = 7200
if 'expires_in' in ret:
expires_in = ret['expires_in']
self.session.set(access_token_key, access_token, expires_in)
return WeChatComponentClient(
authorizer_appid,
self,
session=self.session
) | def function[get_client_by_appid, parameter[self, authorizer_appid]]:
constant[
通过 authorizer_appid 获取 Client 对象
:params authorizer_appid: 授权公众号appid
]
variable[access_token_key] assign[=] call[constant[{0}_access_token].format, parameter[name[authorizer_appid]]]
variable[refresh_token_key] assign[=] call[constant[{0}_refresh_token].format, parameter[name[authorizer_appid]]]
variable[access_token] assign[=] call[name[self].session.get, parameter[name[access_token_key]]]
variable[refresh_token] assign[=] call[name[self].session.get, parameter[name[refresh_token_key]]]
assert[name[refresh_token]]
if <ast.UnaryOp object at 0x7da1b217ee00> begin[:]
variable[ret] assign[=] call[name[self].refresh_authorizer_token, parameter[name[authorizer_appid], name[refresh_token]]]
variable[access_token] assign[=] call[name[ret]][constant[authorizer_access_token]]
variable[refresh_token] assign[=] call[name[ret]][constant[authorizer_refresh_token]]
variable[access_token_key] assign[=] call[constant[{0}_access_token].format, parameter[name[authorizer_appid]]]
variable[expires_in] assign[=] constant[7200]
if compare[constant[expires_in] in name[ret]] begin[:]
variable[expires_in] assign[=] call[name[ret]][constant[expires_in]]
call[name[self].session.set, parameter[name[access_token_key], name[access_token], name[expires_in]]]
return[call[name[WeChatComponentClient], parameter[name[authorizer_appid], name[self]]]] | keyword[def] identifier[get_client_by_appid] ( identifier[self] , identifier[authorizer_appid] ):
literal[string]
identifier[access_token_key] = literal[string] . identifier[format] ( identifier[authorizer_appid] )
identifier[refresh_token_key] = literal[string] . identifier[format] ( identifier[authorizer_appid] )
identifier[access_token] = identifier[self] . identifier[session] . identifier[get] ( identifier[access_token_key] )
identifier[refresh_token] = identifier[self] . identifier[session] . identifier[get] ( identifier[refresh_token_key] )
keyword[assert] identifier[refresh_token]
keyword[if] keyword[not] identifier[access_token] :
identifier[ret] = identifier[self] . identifier[refresh_authorizer_token] (
identifier[authorizer_appid] ,
identifier[refresh_token]
)
identifier[access_token] = identifier[ret] [ literal[string] ]
identifier[refresh_token] = identifier[ret] [ literal[string] ]
identifier[access_token_key] = literal[string] . identifier[format] ( identifier[authorizer_appid] )
identifier[expires_in] = literal[int]
keyword[if] literal[string] keyword[in] identifier[ret] :
identifier[expires_in] = identifier[ret] [ literal[string] ]
identifier[self] . identifier[session] . identifier[set] ( identifier[access_token_key] , identifier[access_token] , identifier[expires_in] )
keyword[return] identifier[WeChatComponentClient] (
identifier[authorizer_appid] ,
identifier[self] ,
identifier[session] = identifier[self] . identifier[session]
) | def get_client_by_appid(self, authorizer_appid):
"""
通过 authorizer_appid 获取 Client 对象
:params authorizer_appid: 授权公众号appid
"""
access_token_key = '{0}_access_token'.format(authorizer_appid)
refresh_token_key = '{0}_refresh_token'.format(authorizer_appid)
access_token = self.session.get(access_token_key)
refresh_token = self.session.get(refresh_token_key)
assert refresh_token
if not access_token:
ret = self.refresh_authorizer_token(authorizer_appid, refresh_token)
access_token = ret['authorizer_access_token']
refresh_token = ret['authorizer_refresh_token']
access_token_key = '{0}_access_token'.format(authorizer_appid)
expires_in = 7200
if 'expires_in' in ret:
expires_in = ret['expires_in'] # depends on [control=['if'], data=['ret']]
self.session.set(access_token_key, access_token, expires_in) # depends on [control=['if'], data=[]]
return WeChatComponentClient(authorizer_appid, self, session=self.session) |
def run_apidoc(_):
"""This method is required by the setup method below."""
import os
dirname = os.path.dirname(__file__)
ignore_paths = [os.path.join(dirname, '../../aaf2/model'),]
# https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/apidoc.py
argv = [
'--force',
'--no-toc',
'--separate',
'--module-first',
'--output-dir',
os.path.join(dirname, 'api'),
os.path.join(dirname, '../../aaf2'),
] + ignore_paths
from sphinx.ext import apidoc
apidoc.main(argv) | def function[run_apidoc, parameter[_]]:
constant[This method is required by the setup method below.]
import module[os]
variable[dirname] assign[=] call[name[os].path.dirname, parameter[name[__file__]]]
variable[ignore_paths] assign[=] list[[<ast.Call object at 0x7da1b16328c0>]]
variable[argv] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b1632710>, <ast.Constant object at 0x7da1b1630d90>, <ast.Constant object at 0x7da1b15f1990>, <ast.Constant object at 0x7da1b15f1a50>, <ast.Constant object at 0x7da1b15f1bd0>, <ast.Call object at 0x7da1b15f2e90>, <ast.Call object at 0x7da1b15f2530>]] + name[ignore_paths]]
from relative_module[sphinx.ext] import module[apidoc]
call[name[apidoc].main, parameter[name[argv]]] | keyword[def] identifier[run_apidoc] ( identifier[_] ):
literal[string]
keyword[import] identifier[os]
identifier[dirname] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] )
identifier[ignore_paths] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[dirname] , literal[string] ),]
identifier[argv] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
identifier[os] . identifier[path] . identifier[join] ( identifier[dirname] , literal[string] ),
identifier[os] . identifier[path] . identifier[join] ( identifier[dirname] , literal[string] ),
]+ identifier[ignore_paths]
keyword[from] identifier[sphinx] . identifier[ext] keyword[import] identifier[apidoc]
identifier[apidoc] . identifier[main] ( identifier[argv] ) | def run_apidoc(_):
"""This method is required by the setup method below."""
import os
dirname = os.path.dirname(__file__)
ignore_paths = [os.path.join(dirname, '../../aaf2/model')]
# https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/apidoc.py
argv = ['--force', '--no-toc', '--separate', '--module-first', '--output-dir', os.path.join(dirname, 'api'), os.path.join(dirname, '../../aaf2')] + ignore_paths
from sphinx.ext import apidoc
apidoc.main(argv) |
def _follow_next(self, url):
"""Follow the 'next' link on paginated results."""
response = self._json(self._get(url), 200)
data = response['data']
next_url = self._get_attribute(response, 'links', 'next')
while next_url is not None:
response = self._json(self._get(next_url), 200)
data.extend(response['data'])
next_url = self._get_attribute(response, 'links', 'next')
return data | def function[_follow_next, parameter[self, url]]:
constant[Follow the 'next' link on paginated results.]
variable[response] assign[=] call[name[self]._json, parameter[call[name[self]._get, parameter[name[url]]], constant[200]]]
variable[data] assign[=] call[name[response]][constant[data]]
variable[next_url] assign[=] call[name[self]._get_attribute, parameter[name[response], constant[links], constant[next]]]
while compare[name[next_url] is_not constant[None]] begin[:]
variable[response] assign[=] call[name[self]._json, parameter[call[name[self]._get, parameter[name[next_url]]], constant[200]]]
call[name[data].extend, parameter[call[name[response]][constant[data]]]]
variable[next_url] assign[=] call[name[self]._get_attribute, parameter[name[response], constant[links], constant[next]]]
return[name[data]] | keyword[def] identifier[_follow_next] ( identifier[self] , identifier[url] ):
literal[string]
identifier[response] = identifier[self] . identifier[_json] ( identifier[self] . identifier[_get] ( identifier[url] ), literal[int] )
identifier[data] = identifier[response] [ literal[string] ]
identifier[next_url] = identifier[self] . identifier[_get_attribute] ( identifier[response] , literal[string] , literal[string] )
keyword[while] identifier[next_url] keyword[is] keyword[not] keyword[None] :
identifier[response] = identifier[self] . identifier[_json] ( identifier[self] . identifier[_get] ( identifier[next_url] ), literal[int] )
identifier[data] . identifier[extend] ( identifier[response] [ literal[string] ])
identifier[next_url] = identifier[self] . identifier[_get_attribute] ( identifier[response] , literal[string] , literal[string] )
keyword[return] identifier[data] | def _follow_next(self, url):
"""Follow the 'next' link on paginated results."""
response = self._json(self._get(url), 200)
data = response['data']
next_url = self._get_attribute(response, 'links', 'next')
while next_url is not None:
response = self._json(self._get(next_url), 200)
data.extend(response['data'])
next_url = self._get_attribute(response, 'links', 'next') # depends on [control=['while'], data=['next_url']]
return data |
def add(self, document=None, watermark=None, underneath=False, output=None, suffix='watermarked', method='pdfrw'):
"""
Add a watermark file to an existing PDF document.
Rotate and upscale watermark file as needed to fit existing PDF document. Watermark can be overlayed or
placed underneath.
:param document: str
PDF document full path
:param watermark: str
Watermark PDF full path
:param underneath: bool
Place watermark either under or over existing PDF document
:param output: str
Output file path
:param suffix: str
Suffix to append to existing PDF document file name
:param method: str
PDF library to be used for watermark adding
:return: str
Watermarked PDF Document full path
"""
if self.use_receipt:
self.receipt.add('WM Placement', 'Overlay')
if not watermark:
watermark = self.watermark
if not document:
document = self.document
self.document = str(WatermarkAdd(document, watermark, output=output, underneath=underneath,
tempdir=self.tempdir, suffix=suffix, method=method))
if self.use_receipt:
self.receipt.add('Watermarked PDF', os.path.basename(self.document))
if self.open_file:
open_window(self.document)
return self.document | def function[add, parameter[self, document, watermark, underneath, output, suffix, method]]:
constant[
Add a watermark file to an existing PDF document.
Rotate and upscale watermark file as needed to fit existing PDF document. Watermark can be overlayed or
placed underneath.
:param document: str
PDF document full path
:param watermark: str
Watermark PDF full path
:param underneath: bool
Place watermark either under or over existing PDF document
:param output: str
Output file path
:param suffix: str
Suffix to append to existing PDF document file name
:param method: str
PDF library to be used for watermark adding
:return: str
Watermarked PDF Document full path
]
if name[self].use_receipt begin[:]
call[name[self].receipt.add, parameter[constant[WM Placement], constant[Overlay]]]
if <ast.UnaryOp object at 0x7da2043472b0> begin[:]
variable[watermark] assign[=] name[self].watermark
if <ast.UnaryOp object at 0x7da2043466e0> begin[:]
variable[document] assign[=] name[self].document
name[self].document assign[=] call[name[str], parameter[call[name[WatermarkAdd], parameter[name[document], name[watermark]]]]]
if name[self].use_receipt begin[:]
call[name[self].receipt.add, parameter[constant[Watermarked PDF], call[name[os].path.basename, parameter[name[self].document]]]]
if name[self].open_file begin[:]
call[name[open_window], parameter[name[self].document]]
return[name[self].document] | keyword[def] identifier[add] ( identifier[self] , identifier[document] = keyword[None] , identifier[watermark] = keyword[None] , identifier[underneath] = keyword[False] , identifier[output] = keyword[None] , identifier[suffix] = literal[string] , identifier[method] = literal[string] ):
literal[string]
keyword[if] identifier[self] . identifier[use_receipt] :
identifier[self] . identifier[receipt] . identifier[add] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[watermark] :
identifier[watermark] = identifier[self] . identifier[watermark]
keyword[if] keyword[not] identifier[document] :
identifier[document] = identifier[self] . identifier[document]
identifier[self] . identifier[document] = identifier[str] ( identifier[WatermarkAdd] ( identifier[document] , identifier[watermark] , identifier[output] = identifier[output] , identifier[underneath] = identifier[underneath] ,
identifier[tempdir] = identifier[self] . identifier[tempdir] , identifier[suffix] = identifier[suffix] , identifier[method] = identifier[method] ))
keyword[if] identifier[self] . identifier[use_receipt] :
identifier[self] . identifier[receipt] . identifier[add] ( literal[string] , identifier[os] . identifier[path] . identifier[basename] ( identifier[self] . identifier[document] ))
keyword[if] identifier[self] . identifier[open_file] :
identifier[open_window] ( identifier[self] . identifier[document] )
keyword[return] identifier[self] . identifier[document] | def add(self, document=None, watermark=None, underneath=False, output=None, suffix='watermarked', method='pdfrw'):
"""
Add a watermark file to an existing PDF document.
Rotate and upscale watermark file as needed to fit existing PDF document. Watermark can be overlayed or
placed underneath.
:param document: str
PDF document full path
:param watermark: str
Watermark PDF full path
:param underneath: bool
Place watermark either under or over existing PDF document
:param output: str
Output file path
:param suffix: str
Suffix to append to existing PDF document file name
:param method: str
PDF library to be used for watermark adding
:return: str
Watermarked PDF Document full path
"""
if self.use_receipt:
self.receipt.add('WM Placement', 'Overlay') # depends on [control=['if'], data=[]]
if not watermark:
watermark = self.watermark # depends on [control=['if'], data=[]]
if not document:
document = self.document # depends on [control=['if'], data=[]]
self.document = str(WatermarkAdd(document, watermark, output=output, underneath=underneath, tempdir=self.tempdir, suffix=suffix, method=method))
if self.use_receipt:
self.receipt.add('Watermarked PDF', os.path.basename(self.document)) # depends on [control=['if'], data=[]]
if self.open_file:
open_window(self.document) # depends on [control=['if'], data=[]]
return self.document |
def _warcprox_opts(self, args):
'''
Takes args as produced by the argument parser built by
_build_arg_parser and builds warcprox arguments object suitable to pass
to warcprox.main.init_controller. Copies some arguments, renames some,
populates some with defaults appropriate for brozzler-easy, etc.
'''
warcprox_opts = warcprox.Options()
warcprox_opts.address = 'localhost'
# let the OS choose an available port; discover it later using
# sock.getsockname()[1]
warcprox_opts.port = 0
warcprox_opts.cacert = args.cacert
warcprox_opts.certs_dir = args.certs_dir
warcprox_opts.directory = args.warcs_dir
warcprox_opts.gzip = True
warcprox_opts.prefix = 'brozzler'
warcprox_opts.size = 1000 * 1000* 1000
warcprox_opts.rollover_idle_time = 3 * 60
warcprox_opts.digest_algorithm = 'sha1'
warcprox_opts.base32 = True
warcprox_opts.stats_db_file = None
warcprox_opts.playback_port = None
warcprox_opts.playback_index_db_file = None
warcprox_opts.rethinkdb_big_table_url = (
'rethinkdb://%s/%s/captures' % (
args.rethinkdb_servers, args.rethinkdb_db))
warcprox_opts.queue_size = 500
warcprox_opts.max_threads = None
warcprox_opts.profile = False
warcprox_opts.onion_tor_socks_proxy = args.onion_tor_socks_proxy
return warcprox_opts | def function[_warcprox_opts, parameter[self, args]]:
constant[
Takes args as produced by the argument parser built by
_build_arg_parser and builds warcprox arguments object suitable to pass
to warcprox.main.init_controller. Copies some arguments, renames some,
populates some with defaults appropriate for brozzler-easy, etc.
]
variable[warcprox_opts] assign[=] call[name[warcprox].Options, parameter[]]
name[warcprox_opts].address assign[=] constant[localhost]
name[warcprox_opts].port assign[=] constant[0]
name[warcprox_opts].cacert assign[=] name[args].cacert
name[warcprox_opts].certs_dir assign[=] name[args].certs_dir
name[warcprox_opts].directory assign[=] name[args].warcs_dir
name[warcprox_opts].gzip assign[=] constant[True]
name[warcprox_opts].prefix assign[=] constant[brozzler]
name[warcprox_opts].size assign[=] binary_operation[binary_operation[constant[1000] * constant[1000]] * constant[1000]]
name[warcprox_opts].rollover_idle_time assign[=] binary_operation[constant[3] * constant[60]]
name[warcprox_opts].digest_algorithm assign[=] constant[sha1]
name[warcprox_opts].base32 assign[=] constant[True]
name[warcprox_opts].stats_db_file assign[=] constant[None]
name[warcprox_opts].playback_port assign[=] constant[None]
name[warcprox_opts].playback_index_db_file assign[=] constant[None]
name[warcprox_opts].rethinkdb_big_table_url assign[=] binary_operation[constant[rethinkdb://%s/%s/captures] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b20bef20>, <ast.Attribute object at 0x7da1b20be6b0>]]]
name[warcprox_opts].queue_size assign[=] constant[500]
name[warcprox_opts].max_threads assign[=] constant[None]
name[warcprox_opts].profile assign[=] constant[False]
name[warcprox_opts].onion_tor_socks_proxy assign[=] name[args].onion_tor_socks_proxy
return[name[warcprox_opts]] | keyword[def] identifier[_warcprox_opts] ( identifier[self] , identifier[args] ):
literal[string]
identifier[warcprox_opts] = identifier[warcprox] . identifier[Options] ()
identifier[warcprox_opts] . identifier[address] = literal[string]
identifier[warcprox_opts] . identifier[port] = literal[int]
identifier[warcprox_opts] . identifier[cacert] = identifier[args] . identifier[cacert]
identifier[warcprox_opts] . identifier[certs_dir] = identifier[args] . identifier[certs_dir]
identifier[warcprox_opts] . identifier[directory] = identifier[args] . identifier[warcs_dir]
identifier[warcprox_opts] . identifier[gzip] = keyword[True]
identifier[warcprox_opts] . identifier[prefix] = literal[string]
identifier[warcprox_opts] . identifier[size] = literal[int] * literal[int] * literal[int]
identifier[warcprox_opts] . identifier[rollover_idle_time] = literal[int] * literal[int]
identifier[warcprox_opts] . identifier[digest_algorithm] = literal[string]
identifier[warcprox_opts] . identifier[base32] = keyword[True]
identifier[warcprox_opts] . identifier[stats_db_file] = keyword[None]
identifier[warcprox_opts] . identifier[playback_port] = keyword[None]
identifier[warcprox_opts] . identifier[playback_index_db_file] = keyword[None]
identifier[warcprox_opts] . identifier[rethinkdb_big_table_url] =(
literal[string] %(
identifier[args] . identifier[rethinkdb_servers] , identifier[args] . identifier[rethinkdb_db] ))
identifier[warcprox_opts] . identifier[queue_size] = literal[int]
identifier[warcprox_opts] . identifier[max_threads] = keyword[None]
identifier[warcprox_opts] . identifier[profile] = keyword[False]
identifier[warcprox_opts] . identifier[onion_tor_socks_proxy] = identifier[args] . identifier[onion_tor_socks_proxy]
keyword[return] identifier[warcprox_opts] | def _warcprox_opts(self, args):
"""
Takes args as produced by the argument parser built by
_build_arg_parser and builds warcprox arguments object suitable to pass
to warcprox.main.init_controller. Copies some arguments, renames some,
populates some with defaults appropriate for brozzler-easy, etc.
"""
warcprox_opts = warcprox.Options()
warcprox_opts.address = 'localhost'
# let the OS choose an available port; discover it later using
# sock.getsockname()[1]
warcprox_opts.port = 0
warcprox_opts.cacert = args.cacert
warcprox_opts.certs_dir = args.certs_dir
warcprox_opts.directory = args.warcs_dir
warcprox_opts.gzip = True
warcprox_opts.prefix = 'brozzler'
warcprox_opts.size = 1000 * 1000 * 1000
warcprox_opts.rollover_idle_time = 3 * 60
warcprox_opts.digest_algorithm = 'sha1'
warcprox_opts.base32 = True
warcprox_opts.stats_db_file = None
warcprox_opts.playback_port = None
warcprox_opts.playback_index_db_file = None
warcprox_opts.rethinkdb_big_table_url = 'rethinkdb://%s/%s/captures' % (args.rethinkdb_servers, args.rethinkdb_db)
warcprox_opts.queue_size = 500
warcprox_opts.max_threads = None
warcprox_opts.profile = False
warcprox_opts.onion_tor_socks_proxy = args.onion_tor_socks_proxy
return warcprox_opts |
def write(self, version):
# type: (str) -> None
""" Write the project version to .py file.
This will regex search in the file for a
``__version__ = VERSION_STRING`` and substitute the version string
for the new version.
"""
with open(self.version_file) as fp:
content = fp.read()
ver_statement = "__version__ = '{}'".format(version)
new_content = RE_PY_VERSION.sub(ver_statement, content)
fs.write_file(self.version_file, new_content) | def function[write, parameter[self, version]]:
constant[ Write the project version to .py file.
This will regex search in the file for a
``__version__ = VERSION_STRING`` and substitute the version string
for the new version.
]
with call[name[open], parameter[name[self].version_file]] begin[:]
variable[content] assign[=] call[name[fp].read, parameter[]]
variable[ver_statement] assign[=] call[constant[__version__ = '{}'].format, parameter[name[version]]]
variable[new_content] assign[=] call[name[RE_PY_VERSION].sub, parameter[name[ver_statement], name[content]]]
call[name[fs].write_file, parameter[name[self].version_file, name[new_content]]] | keyword[def] identifier[write] ( identifier[self] , identifier[version] ):
literal[string]
keyword[with] identifier[open] ( identifier[self] . identifier[version_file] ) keyword[as] identifier[fp] :
identifier[content] = identifier[fp] . identifier[read] ()
identifier[ver_statement] = literal[string] . identifier[format] ( identifier[version] )
identifier[new_content] = identifier[RE_PY_VERSION] . identifier[sub] ( identifier[ver_statement] , identifier[content] )
identifier[fs] . identifier[write_file] ( identifier[self] . identifier[version_file] , identifier[new_content] ) | def write(self, version):
# type: (str) -> None
' Write the project version to .py file.\n\n This will regex search in the file for a\n ``__version__ = VERSION_STRING`` and substitute the version string\n for the new version.\n '
with open(self.version_file) as fp:
content = fp.read() # depends on [control=['with'], data=['fp']]
ver_statement = "__version__ = '{}'".format(version)
new_content = RE_PY_VERSION.sub(ver_statement, content)
fs.write_file(self.version_file, new_content) |
def rtt_get_num_down_buffers(self):
"""After starting RTT, get the current number of down buffers.
Args:
self (JLink): the ``JLink`` instance
Returns:
The number of configured down buffers on the target.
Raises:
JLinkRTTException if the underlying JLINK_RTTERMINAL_Control call fails.
"""
cmd = enums.JLinkRTTCommand.GETNUMBUF
dir = ctypes.c_int(enums.JLinkRTTDirection.DOWN)
return self.rtt_control(cmd, dir) | def function[rtt_get_num_down_buffers, parameter[self]]:
constant[After starting RTT, get the current number of down buffers.
Args:
self (JLink): the ``JLink`` instance
Returns:
The number of configured down buffers on the target.
Raises:
JLinkRTTException if the underlying JLINK_RTTERMINAL_Control call fails.
]
variable[cmd] assign[=] name[enums].JLinkRTTCommand.GETNUMBUF
variable[dir] assign[=] call[name[ctypes].c_int, parameter[name[enums].JLinkRTTDirection.DOWN]]
return[call[name[self].rtt_control, parameter[name[cmd], name[dir]]]] | keyword[def] identifier[rtt_get_num_down_buffers] ( identifier[self] ):
literal[string]
identifier[cmd] = identifier[enums] . identifier[JLinkRTTCommand] . identifier[GETNUMBUF]
identifier[dir] = identifier[ctypes] . identifier[c_int] ( identifier[enums] . identifier[JLinkRTTDirection] . identifier[DOWN] )
keyword[return] identifier[self] . identifier[rtt_control] ( identifier[cmd] , identifier[dir] ) | def rtt_get_num_down_buffers(self):
"""After starting RTT, get the current number of down buffers.
Args:
self (JLink): the ``JLink`` instance
Returns:
The number of configured down buffers on the target.
Raises:
JLinkRTTException if the underlying JLINK_RTTERMINAL_Control call fails.
"""
cmd = enums.JLinkRTTCommand.GETNUMBUF
dir = ctypes.c_int(enums.JLinkRTTDirection.DOWN)
return self.rtt_control(cmd, dir) |
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = get_object_or_404(self.get_queryset(request),
pk=unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(str(obj.identity)),
# this is the change for our override;
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
ctx = self.admin_site.each_context(request)
context = dict(ctx,
title=('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(
force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context) | def function[history_view, parameter[self, request, object_id, extra_context]]:
constant[The 'history' admin view for this model.]
from relative_module[django.contrib.admin.models] import module[LogEntry]
variable[model] assign[=] name[self].model
variable[obj] assign[=] call[name[get_object_or_404], parameter[call[name[self].get_queryset, parameter[name[request]]]]]
if <ast.UnaryOp object at 0x7da1b10838b0> begin[:]
<ast.Raise object at 0x7da1b1080e80>
variable[opts] assign[=] name[model]._meta
variable[app_label] assign[=] name[opts].app_label
variable[action_list] assign[=] call[call[call[name[LogEntry].objects.filter, parameter[]].select_related, parameter[]].order_by, parameter[constant[action_time]]]
variable[ctx] assign[=] call[name[self].admin_site.each_context, parameter[name[request]]]
variable[context] assign[=] call[name[dict], parameter[name[ctx]]]
call[name[context].update, parameter[<ast.BoolOp object at 0x7da1b1080190>]]
return[call[name[TemplateResponse], parameter[name[request], <ast.BoolOp object at 0x7da1b1081ea0>, name[context]]]] | keyword[def] identifier[history_view] ( identifier[self] , identifier[request] , identifier[object_id] , identifier[extra_context] = keyword[None] ):
literal[string]
keyword[from] identifier[django] . identifier[contrib] . identifier[admin] . identifier[models] keyword[import] identifier[LogEntry]
identifier[model] = identifier[self] . identifier[model]
identifier[obj] = identifier[get_object_or_404] ( identifier[self] . identifier[get_queryset] ( identifier[request] ),
identifier[pk] = identifier[unquote] ( identifier[object_id] ))
keyword[if] keyword[not] identifier[self] . identifier[has_change_permission] ( identifier[request] , identifier[obj] ):
keyword[raise] identifier[PermissionDenied]
identifier[opts] = identifier[model] . identifier[_meta]
identifier[app_label] = identifier[opts] . identifier[app_label]
identifier[action_list] = identifier[LogEntry] . identifier[objects] . identifier[filter] (
identifier[object_id] = identifier[unquote] ( identifier[str] ( identifier[obj] . identifier[identity] )),
identifier[content_type] = identifier[get_content_type_for_model] ( identifier[model] )
). identifier[select_related] (). identifier[order_by] ( literal[string] )
identifier[ctx] = identifier[self] . identifier[admin_site] . identifier[each_context] ( identifier[request] )
identifier[context] = identifier[dict] ( identifier[ctx] ,
identifier[title] =( literal[string] )% identifier[force_text] ( identifier[obj] ),
identifier[action_list] = identifier[action_list] ,
identifier[module_name] = identifier[capfirst] (
identifier[force_text] ( identifier[opts] . identifier[verbose_name_plural] )),
identifier[object] = identifier[obj] ,
identifier[opts] = identifier[opts] ,
identifier[preserved_filters] = identifier[self] . identifier[get_preserved_filters] ( identifier[request] ),
)
identifier[context] . identifier[update] ( identifier[extra_context] keyword[or] {})
keyword[return] identifier[TemplateResponse] ( identifier[request] , identifier[self] . identifier[object_history_template] keyword[or] [
literal[string] %( identifier[app_label] , identifier[opts] . identifier[model_name] ),
literal[string] % identifier[app_label] ,
literal[string]
], identifier[context] ) | def history_view(self, request, object_id, extra_context=None):
"""The 'history' admin view for this model."""
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = get_object_or_404(self.get_queryset(request), pk=unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied # depends on [control=['if'], data=[]]
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
# this is the change for our override;
action_list = LogEntry.objects.filter(object_id=unquote(str(obj.identity)), content_type=get_content_type_for_model(model)).select_related().order_by('action_time')
ctx = self.admin_site.each_context(request)
context = dict(ctx, title='Change history: %s' % force_text(obj), action_list=action_list, module_name=capfirst(force_text(opts.verbose_name_plural)), object=obj, opts=opts, preserved_filters=self.get_preserved_filters(request))
context.update(extra_context or {})
return TemplateResponse(request, self.object_history_template or ['admin/%s/%s/object_history.html' % (app_label, opts.model_name), 'admin/%s/object_history.html' % app_label, 'admin/object_history.html'], context) |
def trimmed(self, n_peaks):
"""
:param n_peaks: number of peaks to keep
:returns: an isotope pattern with removed low-intensity peaks
:rtype: CentroidedSpectrum
"""
result = self.copy()
result.trim(n_peaks)
return result | def function[trimmed, parameter[self, n_peaks]]:
constant[
:param n_peaks: number of peaks to keep
:returns: an isotope pattern with removed low-intensity peaks
:rtype: CentroidedSpectrum
]
variable[result] assign[=] call[name[self].copy, parameter[]]
call[name[result].trim, parameter[name[n_peaks]]]
return[name[result]] | keyword[def] identifier[trimmed] ( identifier[self] , identifier[n_peaks] ):
literal[string]
identifier[result] = identifier[self] . identifier[copy] ()
identifier[result] . identifier[trim] ( identifier[n_peaks] )
keyword[return] identifier[result] | def trimmed(self, n_peaks):
"""
:param n_peaks: number of peaks to keep
:returns: an isotope pattern with removed low-intensity peaks
:rtype: CentroidedSpectrum
"""
result = self.copy()
result.trim(n_peaks)
return result |
def make_h5_file(filename,out_dir='./', new_filename = None, max_load = None):
''' Converts file to HDF5 (.h5) format. Default saves output in current dir.
'''
fil_file = Waterfall(filename, max_load = max_load)
if not new_filename:
new_filename = out_dir+filename.replace('.fil','.h5').split('/')[-1]
if '.h5' not in new_filename:
new_filename = new_filename+'.h5'
fil_file.write_to_hdf5(new_filename) | def function[make_h5_file, parameter[filename, out_dir, new_filename, max_load]]:
constant[ Converts file to HDF5 (.h5) format. Default saves output in current dir.
]
variable[fil_file] assign[=] call[name[Waterfall], parameter[name[filename]]]
if <ast.UnaryOp object at 0x7da18f09e3b0> begin[:]
variable[new_filename] assign[=] binary_operation[name[out_dir] + call[call[call[name[filename].replace, parameter[constant[.fil], constant[.h5]]].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da18f09ee60>]]
if compare[constant[.h5] <ast.NotIn object at 0x7da2590d7190> name[new_filename]] begin[:]
variable[new_filename] assign[=] binary_operation[name[new_filename] + constant[.h5]]
call[name[fil_file].write_to_hdf5, parameter[name[new_filename]]] | keyword[def] identifier[make_h5_file] ( identifier[filename] , identifier[out_dir] = literal[string] , identifier[new_filename] = keyword[None] , identifier[max_load] = keyword[None] ):
literal[string]
identifier[fil_file] = identifier[Waterfall] ( identifier[filename] , identifier[max_load] = identifier[max_load] )
keyword[if] keyword[not] identifier[new_filename] :
identifier[new_filename] = identifier[out_dir] + identifier[filename] . identifier[replace] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )[- literal[int] ]
keyword[if] literal[string] keyword[not] keyword[in] identifier[new_filename] :
identifier[new_filename] = identifier[new_filename] + literal[string]
identifier[fil_file] . identifier[write_to_hdf5] ( identifier[new_filename] ) | def make_h5_file(filename, out_dir='./', new_filename=None, max_load=None):
""" Converts file to HDF5 (.h5) format. Default saves output in current dir.
"""
fil_file = Waterfall(filename, max_load=max_load)
if not new_filename:
new_filename = out_dir + filename.replace('.fil', '.h5').split('/')[-1] # depends on [control=['if'], data=[]]
if '.h5' not in new_filename:
new_filename = new_filename + '.h5' # depends on [control=['if'], data=['new_filename']]
fil_file.write_to_hdf5(new_filename) |
def getArc(prom, sig, mc, pos, zerolat):
""" Returns the arc of direction between a promissor
and a significator. Arguments are also the MC, the
geoposition and zerolat to assume zero ecliptical
latitudes.
ZeroLat true => inZodiaco, false => inMundo
"""
pRA, pDecl = prom.eqCoords(zerolat)
sRa, sDecl = sig.eqCoords(zerolat)
mcRa, mcDecl = mc.eqCoords()
return arc(pRA, pDecl, sRa, sDecl, mcRa, pos.lat) | def function[getArc, parameter[prom, sig, mc, pos, zerolat]]:
constant[ Returns the arc of direction between a promissor
and a significator. Arguments are also the MC, the
geoposition and zerolat to assume zero ecliptical
latitudes.
ZeroLat true => inZodiaco, false => inMundo
]
<ast.Tuple object at 0x7da1b11dc100> assign[=] call[name[prom].eqCoords, parameter[name[zerolat]]]
<ast.Tuple object at 0x7da1b11deb90> assign[=] call[name[sig].eqCoords, parameter[name[zerolat]]]
<ast.Tuple object at 0x7da1b11df760> assign[=] call[name[mc].eqCoords, parameter[]]
return[call[name[arc], parameter[name[pRA], name[pDecl], name[sRa], name[sDecl], name[mcRa], name[pos].lat]]] | keyword[def] identifier[getArc] ( identifier[prom] , identifier[sig] , identifier[mc] , identifier[pos] , identifier[zerolat] ):
literal[string]
identifier[pRA] , identifier[pDecl] = identifier[prom] . identifier[eqCoords] ( identifier[zerolat] )
identifier[sRa] , identifier[sDecl] = identifier[sig] . identifier[eqCoords] ( identifier[zerolat] )
identifier[mcRa] , identifier[mcDecl] = identifier[mc] . identifier[eqCoords] ()
keyword[return] identifier[arc] ( identifier[pRA] , identifier[pDecl] , identifier[sRa] , identifier[sDecl] , identifier[mcRa] , identifier[pos] . identifier[lat] ) | def getArc(prom, sig, mc, pos, zerolat):
""" Returns the arc of direction between a promissor
and a significator. Arguments are also the MC, the
geoposition and zerolat to assume zero ecliptical
latitudes.
ZeroLat true => inZodiaco, false => inMundo
"""
(pRA, pDecl) = prom.eqCoords(zerolat)
(sRa, sDecl) = sig.eqCoords(zerolat)
(mcRa, mcDecl) = mc.eqCoords()
return arc(pRA, pDecl, sRa, sDecl, mcRa, pos.lat) |
def write_to_file(self, file_path=''):
"""
Writes the user emails to file.
"""
with open(file_path, 'w+') as out:
out.write('user, email\n')
sorted_names = sorted(self.logins_lower)#sort based on lowercase
for login in sorted_names:
out.write(self.logins_lower[login] + ','
+ self.emails[self.logins_lower[login]] + '\n')
out.close() | def function[write_to_file, parameter[self, file_path]]:
constant[
Writes the user emails to file.
]
with call[name[open], parameter[name[file_path], constant[w+]]] begin[:]
call[name[out].write, parameter[constant[user, email
]]]
variable[sorted_names] assign[=] call[name[sorted], parameter[name[self].logins_lower]]
for taget[name[login]] in starred[name[sorted_names]] begin[:]
call[name[out].write, parameter[binary_operation[binary_operation[binary_operation[call[name[self].logins_lower][name[login]] + constant[,]] + call[name[self].emails][call[name[self].logins_lower][name[login]]]] + constant[
]]]]
call[name[out].close, parameter[]] | keyword[def] identifier[write_to_file] ( identifier[self] , identifier[file_path] = literal[string] ):
literal[string]
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[out] :
identifier[out] . identifier[write] ( literal[string] )
identifier[sorted_names] = identifier[sorted] ( identifier[self] . identifier[logins_lower] )
keyword[for] identifier[login] keyword[in] identifier[sorted_names] :
identifier[out] . identifier[write] ( identifier[self] . identifier[logins_lower] [ identifier[login] ]+ literal[string]
+ identifier[self] . identifier[emails] [ identifier[self] . identifier[logins_lower] [ identifier[login] ]]+ literal[string] )
identifier[out] . identifier[close] () | def write_to_file(self, file_path=''):
"""
Writes the user emails to file.
"""
with open(file_path, 'w+') as out:
out.write('user, email\n')
sorted_names = sorted(self.logins_lower) #sort based on lowercase
for login in sorted_names:
out.write(self.logins_lower[login] + ',' + self.emails[self.logins_lower[login]] + '\n') # depends on [control=['for'], data=['login']] # depends on [control=['with'], data=['out']]
out.close() |
def runSearchFeatures(self, request):
"""
Returns a SearchFeaturesResponse for the specified
SearchFeaturesRequest object.
:param request: JSON string representing searchFeaturesRequest
:return: JSON string representing searchFeatureResponse
"""
return self.runSearchRequest(
request, protocol.SearchFeaturesRequest,
protocol.SearchFeaturesResponse,
self.featuresGenerator) | def function[runSearchFeatures, parameter[self, request]]:
constant[
Returns a SearchFeaturesResponse for the specified
SearchFeaturesRequest object.
:param request: JSON string representing searchFeaturesRequest
:return: JSON string representing searchFeatureResponse
]
return[call[name[self].runSearchRequest, parameter[name[request], name[protocol].SearchFeaturesRequest, name[protocol].SearchFeaturesResponse, name[self].featuresGenerator]]] | keyword[def] identifier[runSearchFeatures] ( identifier[self] , identifier[request] ):
literal[string]
keyword[return] identifier[self] . identifier[runSearchRequest] (
identifier[request] , identifier[protocol] . identifier[SearchFeaturesRequest] ,
identifier[protocol] . identifier[SearchFeaturesResponse] ,
identifier[self] . identifier[featuresGenerator] ) | def runSearchFeatures(self, request):
"""
Returns a SearchFeaturesResponse for the specified
SearchFeaturesRequest object.
:param request: JSON string representing searchFeaturesRequest
:return: JSON string representing searchFeatureResponse
"""
return self.runSearchRequest(request, protocol.SearchFeaturesRequest, protocol.SearchFeaturesResponse, self.featuresGenerator) |
def Uninstall(self, package_name, keep_data=False, timeout_ms=None):
"""Removes a package from the device.
Args:
package_name: Package name of target package.
keep_data: whether to keep the data and cache directories
timeout_ms: Expected timeout for pushing and installing.
Returns:
The pm uninstall output.
"""
cmd = ['pm uninstall']
if keep_data:
cmd.append('-k')
cmd.append('"%s"' % package_name)
return self.Shell(' '.join(cmd), timeout_ms=timeout_ms) | def function[Uninstall, parameter[self, package_name, keep_data, timeout_ms]]:
constant[Removes a package from the device.
Args:
package_name: Package name of target package.
keep_data: whether to keep the data and cache directories
timeout_ms: Expected timeout for pushing and installing.
Returns:
The pm uninstall output.
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b170f3a0>]]
if name[keep_data] begin[:]
call[name[cmd].append, parameter[constant[-k]]]
call[name[cmd].append, parameter[binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> name[package_name]]]]
return[call[name[self].Shell, parameter[call[constant[ ].join, parameter[name[cmd]]]]]] | keyword[def] identifier[Uninstall] ( identifier[self] , identifier[package_name] , identifier[keep_data] = keyword[False] , identifier[timeout_ms] = keyword[None] ):
literal[string]
identifier[cmd] =[ literal[string] ]
keyword[if] identifier[keep_data] :
identifier[cmd] . identifier[append] ( literal[string] )
identifier[cmd] . identifier[append] ( literal[string] % identifier[package_name] )
keyword[return] identifier[self] . identifier[Shell] ( literal[string] . identifier[join] ( identifier[cmd] ), identifier[timeout_ms] = identifier[timeout_ms] ) | def Uninstall(self, package_name, keep_data=False, timeout_ms=None):
"""Removes a package from the device.
Args:
package_name: Package name of target package.
keep_data: whether to keep the data and cache directories
timeout_ms: Expected timeout for pushing and installing.
Returns:
The pm uninstall output.
"""
cmd = ['pm uninstall']
if keep_data:
cmd.append('-k') # depends on [control=['if'], data=[]]
cmd.append('"%s"' % package_name)
return self.Shell(' '.join(cmd), timeout_ms=timeout_ms) |
def qos_map_cos_traffic_class_cos7(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos")
map = ET.SubElement(qos, "map")
cos_traffic_class = ET.SubElement(map, "cos-traffic-class")
name_key = ET.SubElement(cos_traffic_class, "name")
name_key.text = kwargs.pop('name')
cos7 = ET.SubElement(cos_traffic_class, "cos7")
cos7.text = kwargs.pop('cos7')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[qos_map_cos_traffic_class_cos7, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[qos] assign[=] call[name[ET].SubElement, parameter[name[config], constant[qos]]]
variable[map] assign[=] call[name[ET].SubElement, parameter[name[qos], constant[map]]]
variable[cos_traffic_class] assign[=] call[name[ET].SubElement, parameter[name[map], constant[cos-traffic-class]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[cos_traffic_class], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[cos7] assign[=] call[name[ET].SubElement, parameter[name[cos_traffic_class], constant[cos7]]]
name[cos7].text assign[=] call[name[kwargs].pop, parameter[constant[cos7]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[qos_map_cos_traffic_class_cos7] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[qos] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[map] = identifier[ET] . identifier[SubElement] ( identifier[qos] , literal[string] )
identifier[cos_traffic_class] = identifier[ET] . identifier[SubElement] ( identifier[map] , literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[cos_traffic_class] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[cos7] = identifier[ET] . identifier[SubElement] ( identifier[cos_traffic_class] , literal[string] )
identifier[cos7] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def qos_map_cos_traffic_class_cos7(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
qos = ET.SubElement(config, 'qos', xmlns='urn:brocade.com:mgmt:brocade-qos')
map = ET.SubElement(qos, 'map')
cos_traffic_class = ET.SubElement(map, 'cos-traffic-class')
name_key = ET.SubElement(cos_traffic_class, 'name')
name_key.text = kwargs.pop('name')
cos7 = ET.SubElement(cos_traffic_class, 'cos7')
cos7.text = kwargs.pop('cos7')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def create_server(self, datacenter_id, server):
"""
Creates a server within the data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server: A dict of the server to be created.
:type server: ``dict``
"""
data = json.dumps(self._create_server_dict(server))
response = self._perform_request(
url='/datacenters/%s/servers' % (datacenter_id),
method='POST',
data=data)
return response | def function[create_server, parameter[self, datacenter_id, server]]:
constant[
Creates a server within the data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server: A dict of the server to be created.
:type server: ``dict``
]
variable[data] assign[=] call[name[json].dumps, parameter[call[name[self]._create_server_dict, parameter[name[server]]]]]
variable[response] assign[=] call[name[self]._perform_request, parameter[]]
return[name[response]] | keyword[def] identifier[create_server] ( identifier[self] , identifier[datacenter_id] , identifier[server] ):
literal[string]
identifier[data] = identifier[json] . identifier[dumps] ( identifier[self] . identifier[_create_server_dict] ( identifier[server] ))
identifier[response] = identifier[self] . identifier[_perform_request] (
identifier[url] = literal[string] %( identifier[datacenter_id] ),
identifier[method] = literal[string] ,
identifier[data] = identifier[data] )
keyword[return] identifier[response] | def create_server(self, datacenter_id, server):
"""
Creates a server within the data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server: A dict of the server to be created.
:type server: ``dict``
"""
data = json.dumps(self._create_server_dict(server))
response = self._perform_request(url='/datacenters/%s/servers' % datacenter_id, method='POST', data=data)
return response |
def upload_jterator_project(self, pipeline, handles):
'''Uploads a *jterator* project.
Parameters
----------
pipeline: dict
description of the jterator pipeline
handles: dict, optional
description of each module in the jterator pipeline
See also
--------
:func:`tmserver.api.workflow.update_jterator_project`
:class:`tmlib.workflow.jterator.description.PipelineDescription`
:class:`tmlib.workflow.jterator.description.HandleDescriptions`
'''
logger.info(
'upload jterator project for experiment "%s"', self.experiment_name
)
content = {
'pipeline': pipeline,
'handles': handles
}
url = self._build_api_url(
'/experiments/{experiment_id}/workflow/jtproject'.format(
experiment_id=self._experiment_id
)
)
res = self._session.put(url, json=content)
res.raise_for_status() | def function[upload_jterator_project, parameter[self, pipeline, handles]]:
constant[Uploads a *jterator* project.
Parameters
----------
pipeline: dict
description of the jterator pipeline
handles: dict, optional
description of each module in the jterator pipeline
See also
--------
:func:`tmserver.api.workflow.update_jterator_project`
:class:`tmlib.workflow.jterator.description.PipelineDescription`
:class:`tmlib.workflow.jterator.description.HandleDescriptions`
]
call[name[logger].info, parameter[constant[upload jterator project for experiment "%s"], name[self].experiment_name]]
variable[content] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e6440>, <ast.Constant object at 0x7da20c6e5d50>], [<ast.Name object at 0x7da20c6e7670>, <ast.Name object at 0x7da20c6e7f10>]]
variable[url] assign[=] call[name[self]._build_api_url, parameter[call[constant[/experiments/{experiment_id}/workflow/jtproject].format, parameter[]]]]
variable[res] assign[=] call[name[self]._session.put, parameter[name[url]]]
call[name[res].raise_for_status, parameter[]] | keyword[def] identifier[upload_jterator_project] ( identifier[self] , identifier[pipeline] , identifier[handles] ):
literal[string]
identifier[logger] . identifier[info] (
literal[string] , identifier[self] . identifier[experiment_name]
)
identifier[content] ={
literal[string] : identifier[pipeline] ,
literal[string] : identifier[handles]
}
identifier[url] = identifier[self] . identifier[_build_api_url] (
literal[string] . identifier[format] (
identifier[experiment_id] = identifier[self] . identifier[_experiment_id]
)
)
identifier[res] = identifier[self] . identifier[_session] . identifier[put] ( identifier[url] , identifier[json] = identifier[content] )
identifier[res] . identifier[raise_for_status] () | def upload_jterator_project(self, pipeline, handles):
"""Uploads a *jterator* project.
Parameters
----------
pipeline: dict
description of the jterator pipeline
handles: dict, optional
description of each module in the jterator pipeline
See also
--------
:func:`tmserver.api.workflow.update_jterator_project`
:class:`tmlib.workflow.jterator.description.PipelineDescription`
:class:`tmlib.workflow.jterator.description.HandleDescriptions`
"""
logger.info('upload jterator project for experiment "%s"', self.experiment_name)
content = {'pipeline': pipeline, 'handles': handles}
url = self._build_api_url('/experiments/{experiment_id}/workflow/jtproject'.format(experiment_id=self._experiment_id))
res = self._session.put(url, json=content)
res.raise_for_status() |
def create_absolute_values_structure(layer, fields):
"""Helper function to create the structure for absolute values.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param fields: List of name field on which we want to aggregate.
:type fields: list
:return: The data structure.
:rtype: dict
"""
# Let's create a structure like :
# key is the index of the field : (flat table, definition name)
source_fields = layer.keywords['inasafe_fields']
absolute_fields = [field['key'] for field in count_fields]
summaries = {}
for field in source_fields:
if field in absolute_fields:
field_name = source_fields[field]
index = layer.fields().lookupField(field_name)
flat_table = FlatTable(*fields)
summaries[index] = (flat_table, field)
return summaries | def function[create_absolute_values_structure, parameter[layer, fields]]:
constant[Helper function to create the structure for absolute values.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param fields: List of name field on which we want to aggregate.
:type fields: list
:return: The data structure.
:rtype: dict
]
variable[source_fields] assign[=] call[name[layer].keywords][constant[inasafe_fields]]
variable[absolute_fields] assign[=] <ast.ListComp object at 0x7da20c990760>
variable[summaries] assign[=] dictionary[[], []]
for taget[name[field]] in starred[name[source_fields]] begin[:]
if compare[name[field] in name[absolute_fields]] begin[:]
variable[field_name] assign[=] call[name[source_fields]][name[field]]
variable[index] assign[=] call[call[name[layer].fields, parameter[]].lookupField, parameter[name[field_name]]]
variable[flat_table] assign[=] call[name[FlatTable], parameter[<ast.Starred object at 0x7da20c993490>]]
call[name[summaries]][name[index]] assign[=] tuple[[<ast.Name object at 0x7da20c9910f0>, <ast.Name object at 0x7da20c993220>]]
return[name[summaries]] | keyword[def] identifier[create_absolute_values_structure] ( identifier[layer] , identifier[fields] ):
literal[string]
identifier[source_fields] = identifier[layer] . identifier[keywords] [ literal[string] ]
identifier[absolute_fields] =[ identifier[field] [ literal[string] ] keyword[for] identifier[field] keyword[in] identifier[count_fields] ]
identifier[summaries] ={}
keyword[for] identifier[field] keyword[in] identifier[source_fields] :
keyword[if] identifier[field] keyword[in] identifier[absolute_fields] :
identifier[field_name] = identifier[source_fields] [ identifier[field] ]
identifier[index] = identifier[layer] . identifier[fields] (). identifier[lookupField] ( identifier[field_name] )
identifier[flat_table] = identifier[FlatTable] (* identifier[fields] )
identifier[summaries] [ identifier[index] ]=( identifier[flat_table] , identifier[field] )
keyword[return] identifier[summaries] | def create_absolute_values_structure(layer, fields):
"""Helper function to create the structure for absolute values.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param fields: List of name field on which we want to aggregate.
:type fields: list
:return: The data structure.
:rtype: dict
"""
# Let's create a structure like :
# key is the index of the field : (flat table, definition name)
source_fields = layer.keywords['inasafe_fields']
absolute_fields = [field['key'] for field in count_fields]
summaries = {}
for field in source_fields:
if field in absolute_fields:
field_name = source_fields[field]
index = layer.fields().lookupField(field_name)
flat_table = FlatTable(*fields)
summaries[index] = (flat_table, field) # depends on [control=['if'], data=['field']] # depends on [control=['for'], data=['field']]
return summaries |
def subscribe_list(self, list_id):
"""
Subscribe to a list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.subscribe_list(list_id=list_id))) | def function[subscribe_list, parameter[self, list_id]]:
constant[
Subscribe to a list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object
]
return[call[name[List], parameter[call[name[tweepy_list_to_json], parameter[call[name[self]._client.subscribe_list, parameter[]]]]]]] | keyword[def] identifier[subscribe_list] ( identifier[self] , identifier[list_id] ):
literal[string]
keyword[return] identifier[List] ( identifier[tweepy_list_to_json] ( identifier[self] . identifier[_client] . identifier[subscribe_list] ( identifier[list_id] = identifier[list_id] ))) | def subscribe_list(self, list_id):
"""
Subscribe to a list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.subscribe_list(list_id=list_id))) |
def isEquilateral(self):
'''
True if all sides of the triangle are the same length.
All equilateral triangles are also isosceles.
All equilateral triangles are also acute.
'''
if not nearly_eq(self.a, self.b):
return False
if not nearly_eq(self.b, self.c):
return False
return nearly_eq(self.a, self.c) | def function[isEquilateral, parameter[self]]:
constant[
True if all sides of the triangle are the same length.
All equilateral triangles are also isosceles.
All equilateral triangles are also acute.
]
if <ast.UnaryOp object at 0x7da1b10d69b0> begin[:]
return[constant[False]]
if <ast.UnaryOp object at 0x7da1b10d5690> begin[:]
return[constant[False]]
return[call[name[nearly_eq], parameter[name[self].a, name[self].c]]] | keyword[def] identifier[isEquilateral] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[nearly_eq] ( identifier[self] . identifier[a] , identifier[self] . identifier[b] ):
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[nearly_eq] ( identifier[self] . identifier[b] , identifier[self] . identifier[c] ):
keyword[return] keyword[False]
keyword[return] identifier[nearly_eq] ( identifier[self] . identifier[a] , identifier[self] . identifier[c] ) | def isEquilateral(self):
"""
True if all sides of the triangle are the same length.
All equilateral triangles are also isosceles.
All equilateral triangles are also acute.
"""
if not nearly_eq(self.a, self.b):
return False # depends on [control=['if'], data=[]]
if not nearly_eq(self.b, self.c):
return False # depends on [control=['if'], data=[]]
return nearly_eq(self.a, self.c) |
def validate(df, criteria={}, exclude_on_fail=False, **kwargs):
"""Validate scenarios using criteria on timeseries values
Parameters
----------
df: IamDataFrame instance
args: see `IamDataFrame.validate()` for details
kwargs: passed to `df.filter()`
"""
fdf = df.filter(**kwargs)
if len(fdf.data) > 0:
vdf = fdf.validate(criteria=criteria, exclude_on_fail=exclude_on_fail)
df.meta['exclude'] |= fdf.meta['exclude'] # update if any excluded
return vdf | def function[validate, parameter[df, criteria, exclude_on_fail]]:
constant[Validate scenarios using criteria on timeseries values
Parameters
----------
df: IamDataFrame instance
args: see `IamDataFrame.validate()` for details
kwargs: passed to `df.filter()`
]
variable[fdf] assign[=] call[name[df].filter, parameter[]]
if compare[call[name[len], parameter[name[fdf].data]] greater[>] constant[0]] begin[:]
variable[vdf] assign[=] call[name[fdf].validate, parameter[]]
<ast.AugAssign object at 0x7da1b0d1b9d0>
return[name[vdf]] | keyword[def] identifier[validate] ( identifier[df] , identifier[criteria] ={}, identifier[exclude_on_fail] = keyword[False] ,** identifier[kwargs] ):
literal[string]
identifier[fdf] = identifier[df] . identifier[filter] (** identifier[kwargs] )
keyword[if] identifier[len] ( identifier[fdf] . identifier[data] )> literal[int] :
identifier[vdf] = identifier[fdf] . identifier[validate] ( identifier[criteria] = identifier[criteria] , identifier[exclude_on_fail] = identifier[exclude_on_fail] )
identifier[df] . identifier[meta] [ literal[string] ]|= identifier[fdf] . identifier[meta] [ literal[string] ]
keyword[return] identifier[vdf] | def validate(df, criteria={}, exclude_on_fail=False, **kwargs):
"""Validate scenarios using criteria on timeseries values
Parameters
----------
df: IamDataFrame instance
args: see `IamDataFrame.validate()` for details
kwargs: passed to `df.filter()`
"""
fdf = df.filter(**kwargs)
if len(fdf.data) > 0:
vdf = fdf.validate(criteria=criteria, exclude_on_fail=exclude_on_fail)
df.meta['exclude'] |= fdf.meta['exclude'] # update if any excluded
return vdf # depends on [control=['if'], data=[]] |
def drop_matching_records(self, check):
"""Remove a record from the DB."""
matches = self._match(check)
for m in matches:
del self._records[m['msg_id']] | def function[drop_matching_records, parameter[self, check]]:
constant[Remove a record from the DB.]
variable[matches] assign[=] call[name[self]._match, parameter[name[check]]]
for taget[name[m]] in starred[name[matches]] begin[:]
<ast.Delete object at 0x7da18ede79d0> | keyword[def] identifier[drop_matching_records] ( identifier[self] , identifier[check] ):
literal[string]
identifier[matches] = identifier[self] . identifier[_match] ( identifier[check] )
keyword[for] identifier[m] keyword[in] identifier[matches] :
keyword[del] identifier[self] . identifier[_records] [ identifier[m] [ literal[string] ]] | def drop_matching_records(self, check):
"""Remove a record from the DB."""
matches = self._match(check)
for m in matches:
del self._records[m['msg_id']] # depends on [control=['for'], data=['m']] |
def main():
"""
Commandline interface to extract parameters.
"""
setup_main_logger(console=True, file_logging=False)
params = argparse.ArgumentParser(description="Extract specific parameters.")
arguments.add_extract_args(params)
args = params.parse_args()
extract_parameters(args) | def function[main, parameter[]]:
constant[
Commandline interface to extract parameters.
]
call[name[setup_main_logger], parameter[]]
variable[params] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[arguments].add_extract_args, parameter[name[params]]]
variable[args] assign[=] call[name[params].parse_args, parameter[]]
call[name[extract_parameters], parameter[name[args]]] | keyword[def] identifier[main] ():
literal[string]
identifier[setup_main_logger] ( identifier[console] = keyword[True] , identifier[file_logging] = keyword[False] )
identifier[params] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = literal[string] )
identifier[arguments] . identifier[add_extract_args] ( identifier[params] )
identifier[args] = identifier[params] . identifier[parse_args] ()
identifier[extract_parameters] ( identifier[args] ) | def main():
"""
Commandline interface to extract parameters.
"""
setup_main_logger(console=True, file_logging=False)
params = argparse.ArgumentParser(description='Extract specific parameters.')
arguments.add_extract_args(params)
args = params.parse_args()
extract_parameters(args) |
def split_values(ustring, sep=u','):
"""
Splits unicode string with separator C{sep},
but skips escaped separator.
@param ustring: string to split
@type ustring: C{unicode}
@param sep: separator (default to u',')
@type sep: C{unicode}
@return: tuple of splitted elements
"""
assert isinstance(ustring, six.text_type), "uvalue must be unicode, not %s" % type(ustring)
# unicode have special mark symbol 0xffff which cannot be used in a regular text,
# so we use it to mark a place where escaped column was
ustring_marked = ustring.replace(u'\,', u'\uffff')
items = tuple([i.strip().replace(u'\uffff', u',') for i in ustring_marked.split(sep)])
return items | def function[split_values, parameter[ustring, sep]]:
constant[
Splits unicode string with separator C{sep},
but skips escaped separator.
@param ustring: string to split
@type ustring: C{unicode}
@param sep: separator (default to u',')
@type sep: C{unicode}
@return: tuple of splitted elements
]
assert[call[name[isinstance], parameter[name[ustring], name[six].text_type]]]
variable[ustring_marked] assign[=] call[name[ustring].replace, parameter[constant[\,], constant[]]]
variable[items] assign[=] call[name[tuple], parameter[<ast.ListComp object at 0x7da1b0dbd990>]]
return[name[items]] | keyword[def] identifier[split_values] ( identifier[ustring] , identifier[sep] = literal[string] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[ustring] , identifier[six] . identifier[text_type] ), literal[string] % identifier[type] ( identifier[ustring] )
identifier[ustring_marked] = identifier[ustring] . identifier[replace] ( literal[string] , literal[string] )
identifier[items] = identifier[tuple] ([ identifier[i] . identifier[strip] (). identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[i] keyword[in] identifier[ustring_marked] . identifier[split] ( identifier[sep] )])
keyword[return] identifier[items] | def split_values(ustring, sep=u','):
"""
Splits unicode string with separator C{sep},
but skips escaped separator.
@param ustring: string to split
@type ustring: C{unicode}
@param sep: separator (default to u',')
@type sep: C{unicode}
@return: tuple of splitted elements
"""
assert isinstance(ustring, six.text_type), 'uvalue must be unicode, not %s' % type(ustring)
# unicode have special mark symbol 0xffff which cannot be used in a regular text,
# so we use it to mark a place where escaped column was
ustring_marked = ustring.replace(u'\\,', u'\uffff')
items = tuple([i.strip().replace(u'\uffff', u',') for i in ustring_marked.split(sep)])
return items |
def get_start_time(self):
"""
Return the start time of the entry as a :class:`datetime.time` object.
If the start time is `None`, the end time of the previous entry will be
returned instead. If the current entry doesn't have a duration in the
form of a tuple, if there's no previous entry or if the previous entry
has no end time, the value `None` will be returned.
"""
if not isinstance(self.duration, tuple):
return None
if self.duration[0] is not None:
return self.duration[0]
else:
if (self.previous_entry and
isinstance(self.previous_entry.duration, tuple) and
self.previous_entry.duration[1] is not None):
return self.previous_entry.duration[1]
return None | def function[get_start_time, parameter[self]]:
constant[
Return the start time of the entry as a :class:`datetime.time` object.
If the start time is `None`, the end time of the previous entry will be
returned instead. If the current entry doesn't have a duration in the
form of a tuple, if there's no previous entry or if the previous entry
has no end time, the value `None` will be returned.
]
if <ast.UnaryOp object at 0x7da1b1912c80> begin[:]
return[constant[None]]
if compare[call[name[self].duration][constant[0]] is_not constant[None]] begin[:]
return[call[name[self].duration][constant[0]]]
return[constant[None]] | keyword[def] identifier[get_start_time] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[duration] , identifier[tuple] ):
keyword[return] keyword[None]
keyword[if] identifier[self] . identifier[duration] [ literal[int] ] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[duration] [ literal[int] ]
keyword[else] :
keyword[if] ( identifier[self] . identifier[previous_entry] keyword[and]
identifier[isinstance] ( identifier[self] . identifier[previous_entry] . identifier[duration] , identifier[tuple] ) keyword[and]
identifier[self] . identifier[previous_entry] . identifier[duration] [ literal[int] ] keyword[is] keyword[not] keyword[None] ):
keyword[return] identifier[self] . identifier[previous_entry] . identifier[duration] [ literal[int] ]
keyword[return] keyword[None] | def get_start_time(self):
"""
Return the start time of the entry as a :class:`datetime.time` object.
If the start time is `None`, the end time of the previous entry will be
returned instead. If the current entry doesn't have a duration in the
form of a tuple, if there's no previous entry or if the previous entry
has no end time, the value `None` will be returned.
"""
if not isinstance(self.duration, tuple):
return None # depends on [control=['if'], data=[]]
if self.duration[0] is not None:
return self.duration[0] # depends on [control=['if'], data=[]]
elif self.previous_entry and isinstance(self.previous_entry.duration, tuple) and (self.previous_entry.duration[1] is not None):
return self.previous_entry.duration[1] # depends on [control=['if'], data=[]]
return None |
def update(self, other=(), **kwargs):
'''Just like `dict.update`'''
_kwargs = dict(kwargs)
_kwargs.update(other)
for key, value in _kwargs.items():
self[key] = value | def function[update, parameter[self, other]]:
constant[Just like `dict.update`]
variable[_kwargs] assign[=] call[name[dict], parameter[name[kwargs]]]
call[name[_kwargs].update, parameter[name[other]]]
for taget[tuple[[<ast.Name object at 0x7da1b1b6b520>, <ast.Name object at 0x7da1b1b6af50>]]] in starred[call[name[_kwargs].items, parameter[]]] begin[:]
call[name[self]][name[key]] assign[=] name[value] | keyword[def] identifier[update] ( identifier[self] , identifier[other] =(),** identifier[kwargs] ):
literal[string]
identifier[_kwargs] = identifier[dict] ( identifier[kwargs] )
identifier[_kwargs] . identifier[update] ( identifier[other] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[_kwargs] . identifier[items] ():
identifier[self] [ identifier[key] ]= identifier[value] | def update(self, other=(), **kwargs):
"""Just like `dict.update`"""
_kwargs = dict(kwargs)
_kwargs.update(other)
for (key, value) in _kwargs.items():
self[key] = value # depends on [control=['for'], data=[]] |
def _setweights(self):
"Apply dropout to the raw weights."
for layer in self.layer_names:
raw_w = getattr(self, f'{layer}_raw')
self.module._parameters[layer] = F.dropout(raw_w, p=self.weight_p, training=self.training) | def function[_setweights, parameter[self]]:
constant[Apply dropout to the raw weights.]
for taget[name[layer]] in starred[name[self].layer_names] begin[:]
variable[raw_w] assign[=] call[name[getattr], parameter[name[self], <ast.JoinedStr object at 0x7da1b1e9a980>]]
call[name[self].module._parameters][name[layer]] assign[=] call[name[F].dropout, parameter[name[raw_w]]] | keyword[def] identifier[_setweights] ( identifier[self] ):
literal[string]
keyword[for] identifier[layer] keyword[in] identifier[self] . identifier[layer_names] :
identifier[raw_w] = identifier[getattr] ( identifier[self] , literal[string] )
identifier[self] . identifier[module] . identifier[_parameters] [ identifier[layer] ]= identifier[F] . identifier[dropout] ( identifier[raw_w] , identifier[p] = identifier[self] . identifier[weight_p] , identifier[training] = identifier[self] . identifier[training] ) | def _setweights(self):
"""Apply dropout to the raw weights."""
for layer in self.layer_names:
raw_w = getattr(self, f'{layer}_raw')
self.module._parameters[layer] = F.dropout(raw_w, p=self.weight_p, training=self.training) # depends on [control=['for'], data=['layer']] |
def load_device(self, serial=None):
"""Creates an AndroidDevice for the given serial number.
If no serial is given, it will read from the ANDROID_SERIAL
environmental variable. If the environmental variable is not set, then
it will read from 'adb devices' if there is only one.
"""
serials = android_device.list_adb_devices()
if not serials:
raise Error('No adb device found!')
# No serial provided, try to pick up the device automatically.
if not serial:
env_serial = os.environ.get('ANDROID_SERIAL', None)
if env_serial is not None:
serial = env_serial
elif len(serials) == 1:
serial = serials[0]
else:
raise Error(
'Expected one phone, but %d found. Use the -s flag or '
'specify ANDROID_SERIAL.' % len(serials))
if serial not in serials:
raise Error('Device "%s" is not found by adb.' % serial)
ads = android_device.get_instances([serial])
assert len(ads) == 1
self._ad = ads[0] | def function[load_device, parameter[self, serial]]:
constant[Creates an AndroidDevice for the given serial number.
If no serial is given, it will read from the ANDROID_SERIAL
environmental variable. If the environmental variable is not set, then
it will read from 'adb devices' if there is only one.
]
variable[serials] assign[=] call[name[android_device].list_adb_devices, parameter[]]
if <ast.UnaryOp object at 0x7da1b0857220> begin[:]
<ast.Raise object at 0x7da1b08568f0>
if <ast.UnaryOp object at 0x7da1b0857b50> begin[:]
variable[env_serial] assign[=] call[name[os].environ.get, parameter[constant[ANDROID_SERIAL], constant[None]]]
if compare[name[env_serial] is_not constant[None]] begin[:]
variable[serial] assign[=] name[env_serial]
if compare[name[serial] <ast.NotIn object at 0x7da2590d7190> name[serials]] begin[:]
<ast.Raise object at 0x7da1b0632fe0>
variable[ads] assign[=] call[name[android_device].get_instances, parameter[list[[<ast.Name object at 0x7da1b0633520>]]]]
assert[compare[call[name[len], parameter[name[ads]]] equal[==] constant[1]]]
name[self]._ad assign[=] call[name[ads]][constant[0]] | keyword[def] identifier[load_device] ( identifier[self] , identifier[serial] = keyword[None] ):
literal[string]
identifier[serials] = identifier[android_device] . identifier[list_adb_devices] ()
keyword[if] keyword[not] identifier[serials] :
keyword[raise] identifier[Error] ( literal[string] )
keyword[if] keyword[not] identifier[serial] :
identifier[env_serial] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[env_serial] keyword[is] keyword[not] keyword[None] :
identifier[serial] = identifier[env_serial]
keyword[elif] identifier[len] ( identifier[serials] )== literal[int] :
identifier[serial] = identifier[serials] [ literal[int] ]
keyword[else] :
keyword[raise] identifier[Error] (
literal[string]
literal[string] % identifier[len] ( identifier[serials] ))
keyword[if] identifier[serial] keyword[not] keyword[in] identifier[serials] :
keyword[raise] identifier[Error] ( literal[string] % identifier[serial] )
identifier[ads] = identifier[android_device] . identifier[get_instances] ([ identifier[serial] ])
keyword[assert] identifier[len] ( identifier[ads] )== literal[int]
identifier[self] . identifier[_ad] = identifier[ads] [ literal[int] ] | def load_device(self, serial=None):
"""Creates an AndroidDevice for the given serial number.
If no serial is given, it will read from the ANDROID_SERIAL
environmental variable. If the environmental variable is not set, then
it will read from 'adb devices' if there is only one.
"""
serials = android_device.list_adb_devices()
if not serials:
raise Error('No adb device found!') # depends on [control=['if'], data=[]]
# No serial provided, try to pick up the device automatically.
if not serial:
env_serial = os.environ.get('ANDROID_SERIAL', None)
if env_serial is not None:
serial = env_serial # depends on [control=['if'], data=['env_serial']]
elif len(serials) == 1:
serial = serials[0] # depends on [control=['if'], data=[]]
else:
raise Error('Expected one phone, but %d found. Use the -s flag or specify ANDROID_SERIAL.' % len(serials)) # depends on [control=['if'], data=[]]
if serial not in serials:
raise Error('Device "%s" is not found by adb.' % serial) # depends on [control=['if'], data=['serial']]
ads = android_device.get_instances([serial])
assert len(ads) == 1
self._ad = ads[0] |
def warp(self, order):
"""对order/market的封装
[description]
Arguments:
order {[type]} -- [description]
Returns:
[type] -- [description]
"""
# 因为成交模式对时间的封装
if order.order_model == ORDER_MODEL.MARKET:
if order.frequence is FREQUENCE.DAY:
# exact_time = str(datetime.datetime.strptime(
# str(order.datetime), '%Y-%m-%d %H-%M-%S') + datetime.timedelta(day=1))
order.date = order.datetime[0:10]
order.datetime = '{} 09:30:00'.format(order.date)
elif order.frequence in [FREQUENCE.ONE_MIN,
FREQUENCE.FIVE_MIN,
FREQUENCE.FIFTEEN_MIN,
FREQUENCE.THIRTY_MIN,
FREQUENCE.SIXTY_MIN]:
exact_time = str(
datetime.datetime
.strptime(str(order.datetime),
'%Y-%m-%d %H:%M:%S') +
datetime.timedelta(minutes=1)
)
order.date = exact_time[0:10]
order.datetime = exact_time
self.market_data = self.get_market(order)
if self.market_data is None:
return order
order.price = (
float(self.market_data["high"]) +
float(self.market_data["low"])
) * 0.5
elif order.order_model == ORDER_MODEL.NEXT_OPEN:
try:
exact_time = str(
datetime.datetime
.strptime(str(order.datetime),
'%Y-%m-%d %H-%M-%S') + datetime.timedelta(day=1)
)
order.date = exact_time[0:10]
order.datetime = '{} 09:30:00'.format(order.date)
except:
order.datetime = '{} 15:00:00'.format(order.date)
self.market_data = self.get_market(order)
if self.market_data is None:
return order
order.price = float(self.market_data["close"])
elif order.order_model == ORDER_MODEL.CLOSE:
try:
order.datetime = self.market_data.datetime
except:
if len(str(order.datetime)) == 19:
pass
else:
order.datetime = '{} 15:00:00'.format(order.date)
self.market_data = self.get_market(order)
if self.market_data is None:
return order
order.price = float(self.market_data["close"])
elif order.order_model == ORDER_MODEL.STRICT:
'加入严格模式'
if order.frequence is FREQUENCE.DAY:
exact_time = str(
datetime.datetime
.strptime(order.datetime,
'%Y-%m-%d %H-%M-%S') + datetime.timedelta(day=1)
)
order.date = exact_time[0:10]
order.datetime = '{} 09:30:00'.format(order.date)
elif order.frequence in [FREQUENCE.ONE_MIN,
FREQUENCE.FIVE_MIN,
FREQUENCE.FIFTEEN_MIN,
FREQUENCE.THIRTY_MIN,
FREQUENCE.SIXTY_MIN]:
exact_time = str(
datetime.datetime
.strptime(order.datetime,
'%Y-%m-%d %H-%M-%S') +
datetime.timedelta(minute=1)
)
order.date = exact_time[0:10]
order.datetime = exact_time
self.market_data = self.get_market(order)
if self.market_data is None:
return order
if order.towards == 1:
order.price = float(self.market_data["high"])
else:
order.price = float(self.market_data["low"])
return order | def function[warp, parameter[self, order]]:
constant[对order/market的封装
[description]
Arguments:
order {[type]} -- [description]
Returns:
[type] -- [description]
]
if compare[name[order].order_model equal[==] name[ORDER_MODEL].MARKET] begin[:]
if compare[name[order].frequence is name[FREQUENCE].DAY] begin[:]
name[order].date assign[=] call[name[order].datetime][<ast.Slice object at 0x7da1b1e7b070>]
name[order].datetime assign[=] call[constant[{} 09:30:00].format, parameter[name[order].date]]
name[self].market_data assign[=] call[name[self].get_market, parameter[name[order]]]
if compare[name[self].market_data is constant[None]] begin[:]
return[name[order]]
name[order].price assign[=] binary_operation[binary_operation[call[name[float], parameter[call[name[self].market_data][constant[high]]]] + call[name[float], parameter[call[name[self].market_data][constant[low]]]]] * constant[0.5]]
return[name[order]] | keyword[def] identifier[warp] ( identifier[self] , identifier[order] ):
literal[string]
keyword[if] identifier[order] . identifier[order_model] == identifier[ORDER_MODEL] . identifier[MARKET] :
keyword[if] identifier[order] . identifier[frequence] keyword[is] identifier[FREQUENCE] . identifier[DAY] :
identifier[order] . identifier[date] = identifier[order] . identifier[datetime] [ literal[int] : literal[int] ]
identifier[order] . identifier[datetime] = literal[string] . identifier[format] ( identifier[order] . identifier[date] )
keyword[elif] identifier[order] . identifier[frequence] keyword[in] [ identifier[FREQUENCE] . identifier[ONE_MIN] ,
identifier[FREQUENCE] . identifier[FIVE_MIN] ,
identifier[FREQUENCE] . identifier[FIFTEEN_MIN] ,
identifier[FREQUENCE] . identifier[THIRTY_MIN] ,
identifier[FREQUENCE] . identifier[SIXTY_MIN] ]:
identifier[exact_time] = identifier[str] (
identifier[datetime] . identifier[datetime]
. identifier[strptime] ( identifier[str] ( identifier[order] . identifier[datetime] ),
literal[string] )+
identifier[datetime] . identifier[timedelta] ( identifier[minutes] = literal[int] )
)
identifier[order] . identifier[date] = identifier[exact_time] [ literal[int] : literal[int] ]
identifier[order] . identifier[datetime] = identifier[exact_time]
identifier[self] . identifier[market_data] = identifier[self] . identifier[get_market] ( identifier[order] )
keyword[if] identifier[self] . identifier[market_data] keyword[is] keyword[None] :
keyword[return] identifier[order]
identifier[order] . identifier[price] =(
identifier[float] ( identifier[self] . identifier[market_data] [ literal[string] ])+
identifier[float] ( identifier[self] . identifier[market_data] [ literal[string] ])
)* literal[int]
keyword[elif] identifier[order] . identifier[order_model] == identifier[ORDER_MODEL] . identifier[NEXT_OPEN] :
keyword[try] :
identifier[exact_time] = identifier[str] (
identifier[datetime] . identifier[datetime]
. identifier[strptime] ( identifier[str] ( identifier[order] . identifier[datetime] ),
literal[string] )+ identifier[datetime] . identifier[timedelta] ( identifier[day] = literal[int] )
)
identifier[order] . identifier[date] = identifier[exact_time] [ literal[int] : literal[int] ]
identifier[order] . identifier[datetime] = literal[string] . identifier[format] ( identifier[order] . identifier[date] )
keyword[except] :
identifier[order] . identifier[datetime] = literal[string] . identifier[format] ( identifier[order] . identifier[date] )
identifier[self] . identifier[market_data] = identifier[self] . identifier[get_market] ( identifier[order] )
keyword[if] identifier[self] . identifier[market_data] keyword[is] keyword[None] :
keyword[return] identifier[order]
identifier[order] . identifier[price] = identifier[float] ( identifier[self] . identifier[market_data] [ literal[string] ])
keyword[elif] identifier[order] . identifier[order_model] == identifier[ORDER_MODEL] . identifier[CLOSE] :
keyword[try] :
identifier[order] . identifier[datetime] = identifier[self] . identifier[market_data] . identifier[datetime]
keyword[except] :
keyword[if] identifier[len] ( identifier[str] ( identifier[order] . identifier[datetime] ))== literal[int] :
keyword[pass]
keyword[else] :
identifier[order] . identifier[datetime] = literal[string] . identifier[format] ( identifier[order] . identifier[date] )
identifier[self] . identifier[market_data] = identifier[self] . identifier[get_market] ( identifier[order] )
keyword[if] identifier[self] . identifier[market_data] keyword[is] keyword[None] :
keyword[return] identifier[order]
identifier[order] . identifier[price] = identifier[float] ( identifier[self] . identifier[market_data] [ literal[string] ])
keyword[elif] identifier[order] . identifier[order_model] == identifier[ORDER_MODEL] . identifier[STRICT] :
literal[string]
keyword[if] identifier[order] . identifier[frequence] keyword[is] identifier[FREQUENCE] . identifier[DAY] :
identifier[exact_time] = identifier[str] (
identifier[datetime] . identifier[datetime]
. identifier[strptime] ( identifier[order] . identifier[datetime] ,
literal[string] )+ identifier[datetime] . identifier[timedelta] ( identifier[day] = literal[int] )
)
identifier[order] . identifier[date] = identifier[exact_time] [ literal[int] : literal[int] ]
identifier[order] . identifier[datetime] = literal[string] . identifier[format] ( identifier[order] . identifier[date] )
keyword[elif] identifier[order] . identifier[frequence] keyword[in] [ identifier[FREQUENCE] . identifier[ONE_MIN] ,
identifier[FREQUENCE] . identifier[FIVE_MIN] ,
identifier[FREQUENCE] . identifier[FIFTEEN_MIN] ,
identifier[FREQUENCE] . identifier[THIRTY_MIN] ,
identifier[FREQUENCE] . identifier[SIXTY_MIN] ]:
identifier[exact_time] = identifier[str] (
identifier[datetime] . identifier[datetime]
. identifier[strptime] ( identifier[order] . identifier[datetime] ,
literal[string] )+
identifier[datetime] . identifier[timedelta] ( identifier[minute] = literal[int] )
)
identifier[order] . identifier[date] = identifier[exact_time] [ literal[int] : literal[int] ]
identifier[order] . identifier[datetime] = identifier[exact_time]
identifier[self] . identifier[market_data] = identifier[self] . identifier[get_market] ( identifier[order] )
keyword[if] identifier[self] . identifier[market_data] keyword[is] keyword[None] :
keyword[return] identifier[order]
keyword[if] identifier[order] . identifier[towards] == literal[int] :
identifier[order] . identifier[price] = identifier[float] ( identifier[self] . identifier[market_data] [ literal[string] ])
keyword[else] :
identifier[order] . identifier[price] = identifier[float] ( identifier[self] . identifier[market_data] [ literal[string] ])
keyword[return] identifier[order] | def warp(self, order):
"""对order/market的封装
[description]
Arguments:
order {[type]} -- [description]
Returns:
[type] -- [description]
"""
# 因为成交模式对时间的封装
if order.order_model == ORDER_MODEL.MARKET:
if order.frequence is FREQUENCE.DAY:
# exact_time = str(datetime.datetime.strptime(
# str(order.datetime), '%Y-%m-%d %H-%M-%S') + datetime.timedelta(day=1))
order.date = order.datetime[0:10]
order.datetime = '{} 09:30:00'.format(order.date) # depends on [control=['if'], data=[]]
elif order.frequence in [FREQUENCE.ONE_MIN, FREQUENCE.FIVE_MIN, FREQUENCE.FIFTEEN_MIN, FREQUENCE.THIRTY_MIN, FREQUENCE.SIXTY_MIN]:
exact_time = str(datetime.datetime.strptime(str(order.datetime), '%Y-%m-%d %H:%M:%S') + datetime.timedelta(minutes=1))
order.date = exact_time[0:10]
order.datetime = exact_time # depends on [control=['if'], data=[]]
self.market_data = self.get_market(order)
if self.market_data is None:
return order # depends on [control=['if'], data=[]]
order.price = (float(self.market_data['high']) + float(self.market_data['low'])) * 0.5 # depends on [control=['if'], data=[]]
elif order.order_model == ORDER_MODEL.NEXT_OPEN:
try:
exact_time = str(datetime.datetime.strptime(str(order.datetime), '%Y-%m-%d %H-%M-%S') + datetime.timedelta(day=1))
order.date = exact_time[0:10]
order.datetime = '{} 09:30:00'.format(order.date) # depends on [control=['try'], data=[]]
except:
order.datetime = '{} 15:00:00'.format(order.date) # depends on [control=['except'], data=[]]
self.market_data = self.get_market(order)
if self.market_data is None:
return order # depends on [control=['if'], data=[]]
order.price = float(self.market_data['close']) # depends on [control=['if'], data=[]]
elif order.order_model == ORDER_MODEL.CLOSE:
try:
order.datetime = self.market_data.datetime # depends on [control=['try'], data=[]]
except:
if len(str(order.datetime)) == 19:
pass # depends on [control=['if'], data=[]]
else:
order.datetime = '{} 15:00:00'.format(order.date) # depends on [control=['except'], data=[]]
self.market_data = self.get_market(order)
if self.market_data is None:
return order # depends on [control=['if'], data=[]]
order.price = float(self.market_data['close']) # depends on [control=['if'], data=[]]
elif order.order_model == ORDER_MODEL.STRICT:
'加入严格模式'
if order.frequence is FREQUENCE.DAY:
exact_time = str(datetime.datetime.strptime(order.datetime, '%Y-%m-%d %H-%M-%S') + datetime.timedelta(day=1))
order.date = exact_time[0:10]
order.datetime = '{} 09:30:00'.format(order.date) # depends on [control=['if'], data=[]]
elif order.frequence in [FREQUENCE.ONE_MIN, FREQUENCE.FIVE_MIN, FREQUENCE.FIFTEEN_MIN, FREQUENCE.THIRTY_MIN, FREQUENCE.SIXTY_MIN]:
exact_time = str(datetime.datetime.strptime(order.datetime, '%Y-%m-%d %H-%M-%S') + datetime.timedelta(minute=1))
order.date = exact_time[0:10]
order.datetime = exact_time # depends on [control=['if'], data=[]]
self.market_data = self.get_market(order)
if self.market_data is None:
return order # depends on [control=['if'], data=[]]
if order.towards == 1:
order.price = float(self.market_data['high']) # depends on [control=['if'], data=[]]
else:
order.price = float(self.market_data['low']) # depends on [control=['if'], data=[]]
return order |
def powernodes_containing(self, name, directly=False) -> iter:
"""Yield all power nodes containing (power) node of given *name*.
If *directly* is True, will only yield the direct parent of given name.
"""
if directly:
yield from (node for node in self.all_in(name)
if name in self.inclusions[node])
else:
# This algorithm is very bad. Inverting the inclusion dict could
# be far better.
@functools.lru_cache(maxsize=self.node_number(count_pnode=True))
def contains_target(node, target):
succs = self.inclusions[node]
if target in succs:
return True
else:
return any(contains_target(succ, target) for succ in succs)
# populate the cache
for root in self.roots:
contains_target(root, name)
# output all that contains target at some level
yield from (node for node in self.inclusions.keys()
if contains_target(node, name)) | def function[powernodes_containing, parameter[self, name, directly]]:
constant[Yield all power nodes containing (power) node of given *name*.
If *directly* is True, will only yield the direct parent of given name.
]
if name[directly] begin[:]
<ast.YieldFrom object at 0x7da18ede6f20> | keyword[def] identifier[powernodes_containing] ( identifier[self] , identifier[name] , identifier[directly] = keyword[False] )-> identifier[iter] :
literal[string]
keyword[if] identifier[directly] :
keyword[yield] keyword[from] ( identifier[node] keyword[for] identifier[node] keyword[in] identifier[self] . identifier[all_in] ( identifier[name] )
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[inclusions] [ identifier[node] ])
keyword[else] :
@ identifier[functools] . identifier[lru_cache] ( identifier[maxsize] = identifier[self] . identifier[node_number] ( identifier[count_pnode] = keyword[True] ))
keyword[def] identifier[contains_target] ( identifier[node] , identifier[target] ):
identifier[succs] = identifier[self] . identifier[inclusions] [ identifier[node] ]
keyword[if] identifier[target] keyword[in] identifier[succs] :
keyword[return] keyword[True]
keyword[else] :
keyword[return] identifier[any] ( identifier[contains_target] ( identifier[succ] , identifier[target] ) keyword[for] identifier[succ] keyword[in] identifier[succs] )
keyword[for] identifier[root] keyword[in] identifier[self] . identifier[roots] :
identifier[contains_target] ( identifier[root] , identifier[name] )
keyword[yield] keyword[from] ( identifier[node] keyword[for] identifier[node] keyword[in] identifier[self] . identifier[inclusions] . identifier[keys] ()
keyword[if] identifier[contains_target] ( identifier[node] , identifier[name] )) | def powernodes_containing(self, name, directly=False) -> iter:
"""Yield all power nodes containing (power) node of given *name*.
If *directly* is True, will only yield the direct parent of given name.
"""
if directly:
yield from (node for node in self.all_in(name) if name in self.inclusions[node]) # depends on [control=['if'], data=[]]
else:
# This algorithm is very bad. Inverting the inclusion dict could
# be far better.
@functools.lru_cache(maxsize=self.node_number(count_pnode=True))
def contains_target(node, target):
succs = self.inclusions[node]
if target in succs:
return True # depends on [control=['if'], data=[]]
else:
return any((contains_target(succ, target) for succ in succs))
# populate the cache
for root in self.roots:
contains_target(root, name) # depends on [control=['for'], data=['root']]
# output all that contains target at some level
yield from (node for node in self.inclusions.keys() if contains_target(node, name)) |
def _write_str(self, data):
"""
Converts the given data then writes it
:param data: Data to be written
:return: The result of ``self.output.write()``
"""
with self.__lock:
self.output.write(
to_str(data, self.encoding)
.encode()
.decode(self.out_encoding, errors="replace")
) | def function[_write_str, parameter[self, data]]:
constant[
Converts the given data then writes it
:param data: Data to be written
:return: The result of ``self.output.write()``
]
with name[self].__lock begin[:]
call[name[self].output.write, parameter[call[call[call[name[to_str], parameter[name[data], name[self].encoding]].encode, parameter[]].decode, parameter[name[self].out_encoding]]]] | keyword[def] identifier[_write_str] ( identifier[self] , identifier[data] ):
literal[string]
keyword[with] identifier[self] . identifier[__lock] :
identifier[self] . identifier[output] . identifier[write] (
identifier[to_str] ( identifier[data] , identifier[self] . identifier[encoding] )
. identifier[encode] ()
. identifier[decode] ( identifier[self] . identifier[out_encoding] , identifier[errors] = literal[string] )
) | def _write_str(self, data):
"""
Converts the given data then writes it
:param data: Data to be written
:return: The result of ``self.output.write()``
"""
with self.__lock:
self.output.write(to_str(data, self.encoding).encode().decode(self.out_encoding, errors='replace')) # depends on [control=['with'], data=[]] |
def demo(host, port):
"""Basic demo of the monitoring capabilities."""
# logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
stl = AsyncSatel(host,
port,
loop,
[1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 25, 26, 27, 28, 29, 30],
[8, 9, 10]
)
loop.run_until_complete(stl.connect())
loop.create_task(stl.arm("3333", 1))
loop.create_task(stl.disarm("3333"))
loop.create_task(stl.keep_alive())
loop.create_task(stl.monitor_status())
loop.run_forever()
loop.close() | def function[demo, parameter[host, port]]:
constant[Basic demo of the monitoring capabilities.]
variable[loop] assign[=] call[name[asyncio].get_event_loop, parameter[]]
variable[stl] assign[=] call[name[AsyncSatel], parameter[name[host], name[port], name[loop], list[[<ast.Constant object at 0x7da2041d9d80>, <ast.Constant object at 0x7da2041dbb80>, <ast.Constant object at 0x7da2041d9930>, <ast.Constant object at 0x7da2041d9060>, <ast.Constant object at 0x7da2041d9a80>, <ast.Constant object at 0x7da2041d9540>, <ast.Constant object at 0x7da2041d9600>, <ast.Constant object at 0x7da2041da710>, <ast.Constant object at 0x7da2041da170>, <ast.Constant object at 0x7da2041d9a50>, <ast.Constant object at 0x7da2041db2e0>, <ast.Constant object at 0x7da2041dbd90>, <ast.Constant object at 0x7da2041da020>, <ast.Constant object at 0x7da2041d88e0>, <ast.Constant object at 0x7da2041d8910>, <ast.Constant object at 0x7da2041d8a00>, <ast.Constant object at 0x7da2041daa70>, <ast.Constant object at 0x7da2041d9510>, <ast.Constant object at 0x7da2041d90f0>, <ast.Constant object at 0x7da2041db130>, <ast.Constant object at 0x7da2041db4f0>, <ast.Constant object at 0x7da2041d98d0>, <ast.Constant object at 0x7da2041d9d50>, <ast.Constant object at 0x7da2041dbd00>, <ast.Constant object at 0x7da2041d85e0>, <ast.Constant object at 0x7da2041d89d0>]], list[[<ast.Constant object at 0x7da2041d9fc0>, <ast.Constant object at 0x7da2041d8ac0>, <ast.Constant object at 0x7da2041d8e50>]]]]
call[name[loop].run_until_complete, parameter[call[name[stl].connect, parameter[]]]]
call[name[loop].create_task, parameter[call[name[stl].arm, parameter[constant[3333], constant[1]]]]]
call[name[loop].create_task, parameter[call[name[stl].disarm, parameter[constant[3333]]]]]
call[name[loop].create_task, parameter[call[name[stl].keep_alive, parameter[]]]]
call[name[loop].create_task, parameter[call[name[stl].monitor_status, parameter[]]]]
call[name[loop].run_forever, parameter[]]
call[name[loop].close, parameter[]] | keyword[def] identifier[demo] ( identifier[host] , identifier[port] ):
literal[string]
identifier[loop] = identifier[asyncio] . identifier[get_event_loop] ()
identifier[stl] = identifier[AsyncSatel] ( identifier[host] ,
identifier[port] ,
identifier[loop] ,
[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ,
literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] , literal[int] ]
)
identifier[loop] . identifier[run_until_complete] ( identifier[stl] . identifier[connect] ())
identifier[loop] . identifier[create_task] ( identifier[stl] . identifier[arm] ( literal[string] , literal[int] ))
identifier[loop] . identifier[create_task] ( identifier[stl] . identifier[disarm] ( literal[string] ))
identifier[loop] . identifier[create_task] ( identifier[stl] . identifier[keep_alive] ())
identifier[loop] . identifier[create_task] ( identifier[stl] . identifier[monitor_status] ())
identifier[loop] . identifier[run_forever] ()
identifier[loop] . identifier[close] () | def demo(host, port):
"""Basic demo of the monitoring capabilities."""
# logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
stl = AsyncSatel(host, port, loop, [1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 25, 26, 27, 28, 29, 30], [8, 9, 10])
loop.run_until_complete(stl.connect())
loop.create_task(stl.arm('3333', 1))
loop.create_task(stl.disarm('3333'))
loop.create_task(stl.keep_alive())
loop.create_task(stl.monitor_status())
loop.run_forever()
loop.close() |
def _init():
"""Initialize the furious context and registry.
NOTE: Do not directly run this method.
"""
# If there is a context and it is initialized to this request,
# return, otherwise reinitialize the _local_context.
if (hasattr(_local_context, '_initialized') and
_local_context._initialized == os.environ.get('REQUEST_ID_HASH')):
return
# Used to track the context object stack.
_local_context.registry = []
# Used to provide easy access to the currently running Async job.
_local_context._executing_async_context = None
_local_context._executing_async = []
# So that we do not inadvertently reinitialize the local context.
_local_context._initialized = os.environ.get('REQUEST_ID_HASH')
return _local_context | def function[_init, parameter[]]:
constant[Initialize the furious context and registry.
NOTE: Do not directly run this method.
]
if <ast.BoolOp object at 0x7da1b197e950> begin[:]
return[None]
name[_local_context].registry assign[=] list[[]]
name[_local_context]._executing_async_context assign[=] constant[None]
name[_local_context]._executing_async assign[=] list[[]]
name[_local_context]._initialized assign[=] call[name[os].environ.get, parameter[constant[REQUEST_ID_HASH]]]
return[name[_local_context]] | keyword[def] identifier[_init] ():
literal[string]
keyword[if] ( identifier[hasattr] ( identifier[_local_context] , literal[string] ) keyword[and]
identifier[_local_context] . identifier[_initialized] == identifier[os] . identifier[environ] . identifier[get] ( literal[string] )):
keyword[return]
identifier[_local_context] . identifier[registry] =[]
identifier[_local_context] . identifier[_executing_async_context] = keyword[None]
identifier[_local_context] . identifier[_executing_async] =[]
identifier[_local_context] . identifier[_initialized] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
keyword[return] identifier[_local_context] | def _init():
"""Initialize the furious context and registry.
NOTE: Do not directly run this method.
"""
# If there is a context and it is initialized to this request,
# return, otherwise reinitialize the _local_context.
if hasattr(_local_context, '_initialized') and _local_context._initialized == os.environ.get('REQUEST_ID_HASH'):
return # depends on [control=['if'], data=[]]
# Used to track the context object stack.
_local_context.registry = []
# Used to provide easy access to the currently running Async job.
_local_context._executing_async_context = None
_local_context._executing_async = []
# So that we do not inadvertently reinitialize the local context.
_local_context._initialized = os.environ.get('REQUEST_ID_HASH')
return _local_context |
def addVar(self, sig: object, name: str, sigType: VCD_SIG_TYPE, width: int,
valueFormatter: Callable[["Value"], str]):
"""
Add variable to scope
:ivar sig: user specified object to keep track of VcdVarInfo in change()
:ivar sigType: vcd type name
:ivar valueFormatter: value which converts new value in change() to vcd string
"""
vInf = self._writer._idScope.registerVariable(sig, name, self, width,
sigType, valueFormatter)
self.children[vInf.name] = vInf
self._writer._oFile.write("$var %s %d %s %s $end\n" % (
sigType, vInf.width, vInf.vcdId, vInf.name)) | def function[addVar, parameter[self, sig, name, sigType, width, valueFormatter]]:
constant[
Add variable to scope
:ivar sig: user specified object to keep track of VcdVarInfo in change()
:ivar sigType: vcd type name
:ivar valueFormatter: value which converts new value in change() to vcd string
]
variable[vInf] assign[=] call[name[self]._writer._idScope.registerVariable, parameter[name[sig], name[name], name[self], name[width], name[sigType], name[valueFormatter]]]
call[name[self].children][name[vInf].name] assign[=] name[vInf]
call[name[self]._writer._oFile.write, parameter[binary_operation[constant[$var %s %d %s %s $end
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c9934f0>, <ast.Attribute object at 0x7da20c992770>, <ast.Attribute object at 0x7da20c992f80>, <ast.Attribute object at 0x7da20c991570>]]]]] | keyword[def] identifier[addVar] ( identifier[self] , identifier[sig] : identifier[object] , identifier[name] : identifier[str] , identifier[sigType] : identifier[VCD_SIG_TYPE] , identifier[width] : identifier[int] ,
identifier[valueFormatter] : identifier[Callable] [[ literal[string] ], identifier[str] ]):
literal[string]
identifier[vInf] = identifier[self] . identifier[_writer] . identifier[_idScope] . identifier[registerVariable] ( identifier[sig] , identifier[name] , identifier[self] , identifier[width] ,
identifier[sigType] , identifier[valueFormatter] )
identifier[self] . identifier[children] [ identifier[vInf] . identifier[name] ]= identifier[vInf]
identifier[self] . identifier[_writer] . identifier[_oFile] . identifier[write] ( literal[string] %(
identifier[sigType] , identifier[vInf] . identifier[width] , identifier[vInf] . identifier[vcdId] , identifier[vInf] . identifier[name] )) | def addVar(self, sig: object, name: str, sigType: VCD_SIG_TYPE, width: int, valueFormatter: Callable[['Value'], str]):
"""
Add variable to scope
:ivar sig: user specified object to keep track of VcdVarInfo in change()
:ivar sigType: vcd type name
:ivar valueFormatter: value which converts new value in change() to vcd string
"""
vInf = self._writer._idScope.registerVariable(sig, name, self, width, sigType, valueFormatter)
self.children[vInf.name] = vInf
self._writer._oFile.write('$var %s %d %s %s $end\n' % (sigType, vInf.width, vInf.vcdId, vInf.name)) |
def itunessd_to_dics(itunessd):
"""
:param itunessd: the whole iTunesSD bytes data
:return: translate to tree object, see doc of dics_to_itunessd
"""
# header
header_size = get_table_size(header_table)
header_chunk = itunessd[0:header_size]
header_dic = chunk_to_dic(header_chunk, header_table)
# tracks
tracks_header_dic, tracks_offsets = get_dic_sub_numbers(itunessd, header_dic['tracks_header_offset'],
tracks_header_table)
tracks_dics = []
for track_offset in tracks_offsets:
_track_dic = chunk_to_dic(itunessd[track_offset:], track_table)
track_dic = get_custom_fields_dic(_track_dic, track_table)
tracks_dics.append(track_dic)
# playlists
playlists_header_dic, playlists_offsets = get_dic_sub_numbers(itunessd, header_dic['playlists_header_offset'],
playlists_header_table)
playlists_dics_and_indexes = []
for playlist_offset in playlists_offsets:
_playlist_header_dic, indexes_of_tracks = get_dic_sub_numbers(itunessd, playlist_offset, playlist_header_table)
playlist_header_dic = get_custom_fields_dic(_playlist_header_dic, playlist_header_table)
playlists_dics_and_indexes.append((playlist_header_dic, indexes_of_tracks))
return get_custom_fields_dic(header_dic, header_table), tracks_dics, playlists_dics_and_indexes | def function[itunessd_to_dics, parameter[itunessd]]:
constant[
:param itunessd: the whole iTunesSD bytes data
:return: translate to tree object, see doc of dics_to_itunessd
]
variable[header_size] assign[=] call[name[get_table_size], parameter[name[header_table]]]
variable[header_chunk] assign[=] call[name[itunessd]][<ast.Slice object at 0x7da1b1131f60>]
variable[header_dic] assign[=] call[name[chunk_to_dic], parameter[name[header_chunk], name[header_table]]]
<ast.Tuple object at 0x7da1b1130490> assign[=] call[name[get_dic_sub_numbers], parameter[name[itunessd], call[name[header_dic]][constant[tracks_header_offset]], name[tracks_header_table]]]
variable[tracks_dics] assign[=] list[[]]
for taget[name[track_offset]] in starred[name[tracks_offsets]] begin[:]
variable[_track_dic] assign[=] call[name[chunk_to_dic], parameter[call[name[itunessd]][<ast.Slice object at 0x7da1b11a7f40>], name[track_table]]]
variable[track_dic] assign[=] call[name[get_custom_fields_dic], parameter[name[_track_dic], name[track_table]]]
call[name[tracks_dics].append, parameter[name[track_dic]]]
<ast.Tuple object at 0x7da1b1142860> assign[=] call[name[get_dic_sub_numbers], parameter[name[itunessd], call[name[header_dic]][constant[playlists_header_offset]], name[playlists_header_table]]]
variable[playlists_dics_and_indexes] assign[=] list[[]]
for taget[name[playlist_offset]] in starred[name[playlists_offsets]] begin[:]
<ast.Tuple object at 0x7da1b1142170> assign[=] call[name[get_dic_sub_numbers], parameter[name[itunessd], name[playlist_offset], name[playlist_header_table]]]
variable[playlist_header_dic] assign[=] call[name[get_custom_fields_dic], parameter[name[_playlist_header_dic], name[playlist_header_table]]]
call[name[playlists_dics_and_indexes].append, parameter[tuple[[<ast.Name object at 0x7da1b103a5f0>, <ast.Name object at 0x7da1b1039f30>]]]]
return[tuple[[<ast.Call object at 0x7da1b103b310>, <ast.Name object at 0x7da1b103aad0>, <ast.Name object at 0x7da1b103a140>]]] | keyword[def] identifier[itunessd_to_dics] ( identifier[itunessd] ):
literal[string]
identifier[header_size] = identifier[get_table_size] ( identifier[header_table] )
identifier[header_chunk] = identifier[itunessd] [ literal[int] : identifier[header_size] ]
identifier[header_dic] = identifier[chunk_to_dic] ( identifier[header_chunk] , identifier[header_table] )
identifier[tracks_header_dic] , identifier[tracks_offsets] = identifier[get_dic_sub_numbers] ( identifier[itunessd] , identifier[header_dic] [ literal[string] ],
identifier[tracks_header_table] )
identifier[tracks_dics] =[]
keyword[for] identifier[track_offset] keyword[in] identifier[tracks_offsets] :
identifier[_track_dic] = identifier[chunk_to_dic] ( identifier[itunessd] [ identifier[track_offset] :], identifier[track_table] )
identifier[track_dic] = identifier[get_custom_fields_dic] ( identifier[_track_dic] , identifier[track_table] )
identifier[tracks_dics] . identifier[append] ( identifier[track_dic] )
identifier[playlists_header_dic] , identifier[playlists_offsets] = identifier[get_dic_sub_numbers] ( identifier[itunessd] , identifier[header_dic] [ literal[string] ],
identifier[playlists_header_table] )
identifier[playlists_dics_and_indexes] =[]
keyword[for] identifier[playlist_offset] keyword[in] identifier[playlists_offsets] :
identifier[_playlist_header_dic] , identifier[indexes_of_tracks] = identifier[get_dic_sub_numbers] ( identifier[itunessd] , identifier[playlist_offset] , identifier[playlist_header_table] )
identifier[playlist_header_dic] = identifier[get_custom_fields_dic] ( identifier[_playlist_header_dic] , identifier[playlist_header_table] )
identifier[playlists_dics_and_indexes] . identifier[append] (( identifier[playlist_header_dic] , identifier[indexes_of_tracks] ))
keyword[return] identifier[get_custom_fields_dic] ( identifier[header_dic] , identifier[header_table] ), identifier[tracks_dics] , identifier[playlists_dics_and_indexes] | def itunessd_to_dics(itunessd):
"""
:param itunessd: the whole iTunesSD bytes data
:return: translate to tree object, see doc of dics_to_itunessd
"""
# header
header_size = get_table_size(header_table)
header_chunk = itunessd[0:header_size]
header_dic = chunk_to_dic(header_chunk, header_table)
# tracks
(tracks_header_dic, tracks_offsets) = get_dic_sub_numbers(itunessd, header_dic['tracks_header_offset'], tracks_header_table)
tracks_dics = []
for track_offset in tracks_offsets:
_track_dic = chunk_to_dic(itunessd[track_offset:], track_table)
track_dic = get_custom_fields_dic(_track_dic, track_table)
tracks_dics.append(track_dic) # depends on [control=['for'], data=['track_offset']]
# playlists
(playlists_header_dic, playlists_offsets) = get_dic_sub_numbers(itunessd, header_dic['playlists_header_offset'], playlists_header_table)
playlists_dics_and_indexes = []
for playlist_offset in playlists_offsets:
(_playlist_header_dic, indexes_of_tracks) = get_dic_sub_numbers(itunessd, playlist_offset, playlist_header_table)
playlist_header_dic = get_custom_fields_dic(_playlist_header_dic, playlist_header_table)
playlists_dics_and_indexes.append((playlist_header_dic, indexes_of_tracks)) # depends on [control=['for'], data=['playlist_offset']]
return (get_custom_fields_dic(header_dic, header_table), tracks_dics, playlists_dics_and_indexes) |
def _convert_and_box_cache(arg, cache_array, box, errors, name=None):
"""
Convert array of dates with a cache and box the result
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
box : boolean
True boxes result as an Index-like, False returns an ndarray
errors : string
'ignore' plus box=True will convert result to Index
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : datetime of converted dates
Returns:
- Index-like if box=True
- ndarray if box=False
"""
from pandas import Series, DatetimeIndex, Index
result = Series(arg).map(cache_array)
if box:
if errors == 'ignore':
return Index(result, name=name)
else:
return DatetimeIndex(result, name=name)
return result.values | def function[_convert_and_box_cache, parameter[arg, cache_array, box, errors, name]]:
constant[
Convert array of dates with a cache and box the result
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
box : boolean
True boxes result as an Index-like, False returns an ndarray
errors : string
'ignore' plus box=True will convert result to Index
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : datetime of converted dates
Returns:
- Index-like if box=True
- ndarray if box=False
]
from relative_module[pandas] import module[Series], module[DatetimeIndex], module[Index]
variable[result] assign[=] call[call[name[Series], parameter[name[arg]]].map, parameter[name[cache_array]]]
if name[box] begin[:]
if compare[name[errors] equal[==] constant[ignore]] begin[:]
return[call[name[Index], parameter[name[result]]]]
return[name[result].values] | keyword[def] identifier[_convert_and_box_cache] ( identifier[arg] , identifier[cache_array] , identifier[box] , identifier[errors] , identifier[name] = keyword[None] ):
literal[string]
keyword[from] identifier[pandas] keyword[import] identifier[Series] , identifier[DatetimeIndex] , identifier[Index]
identifier[result] = identifier[Series] ( identifier[arg] ). identifier[map] ( identifier[cache_array] )
keyword[if] identifier[box] :
keyword[if] identifier[errors] == literal[string] :
keyword[return] identifier[Index] ( identifier[result] , identifier[name] = identifier[name] )
keyword[else] :
keyword[return] identifier[DatetimeIndex] ( identifier[result] , identifier[name] = identifier[name] )
keyword[return] identifier[result] . identifier[values] | def _convert_and_box_cache(arg, cache_array, box, errors, name=None):
"""
Convert array of dates with a cache and box the result
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
box : boolean
True boxes result as an Index-like, False returns an ndarray
errors : string
'ignore' plus box=True will convert result to Index
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : datetime of converted dates
Returns:
- Index-like if box=True
- ndarray if box=False
"""
from pandas import Series, DatetimeIndex, Index
result = Series(arg).map(cache_array)
if box:
if errors == 'ignore':
return Index(result, name=name) # depends on [control=['if'], data=[]]
else:
return DatetimeIndex(result, name=name) # depends on [control=['if'], data=[]]
return result.values |
def lookup_token(self, token=None, accessor=False, wrap_ttl=None):
"""GET /auth/token/lookup/<token>
GET /auth/token/lookup-accessor/<token-accessor>
GET /auth/token/lookup-self
:param token:
:type token: str.
:param accessor:
:type accessor: str.
:param wrap_ttl:
:type wrap_ttl: int.
:return:
:rtype:
"""
token_param = {
'token': token,
}
accessor_param = {
'accessor': token,
}
if token:
if accessor:
path = '/v1/auth/token/lookup-accessor'
return self._adapter.post(path, json=accessor_param, wrap_ttl=wrap_ttl).json()
else:
path = '/v1/auth/token/lookup'
return self._adapter.post(path, json=token_param).json()
else:
path = '/v1/auth/token/lookup-self'
return self._adapter.get(path, wrap_ttl=wrap_ttl).json() | def function[lookup_token, parameter[self, token, accessor, wrap_ttl]]:
constant[GET /auth/token/lookup/<token>
GET /auth/token/lookup-accessor/<token-accessor>
GET /auth/token/lookup-self
:param token:
:type token: str.
:param accessor:
:type accessor: str.
:param wrap_ttl:
:type wrap_ttl: int.
:return:
:rtype:
]
variable[token_param] assign[=] dictionary[[<ast.Constant object at 0x7da1b235a170>], [<ast.Name object at 0x7da1b2358700>]]
variable[accessor_param] assign[=] dictionary[[<ast.Constant object at 0x7da1b2358d30>], [<ast.Name object at 0x7da1b23592a0>]]
if name[token] begin[:]
if name[accessor] begin[:]
variable[path] assign[=] constant[/v1/auth/token/lookup-accessor]
return[call[call[name[self]._adapter.post, parameter[name[path]]].json, parameter[]]] | keyword[def] identifier[lookup_token] ( identifier[self] , identifier[token] = keyword[None] , identifier[accessor] = keyword[False] , identifier[wrap_ttl] = keyword[None] ):
literal[string]
identifier[token_param] ={
literal[string] : identifier[token] ,
}
identifier[accessor_param] ={
literal[string] : identifier[token] ,
}
keyword[if] identifier[token] :
keyword[if] identifier[accessor] :
identifier[path] = literal[string]
keyword[return] identifier[self] . identifier[_adapter] . identifier[post] ( identifier[path] , identifier[json] = identifier[accessor_param] , identifier[wrap_ttl] = identifier[wrap_ttl] ). identifier[json] ()
keyword[else] :
identifier[path] = literal[string]
keyword[return] identifier[self] . identifier[_adapter] . identifier[post] ( identifier[path] , identifier[json] = identifier[token_param] ). identifier[json] ()
keyword[else] :
identifier[path] = literal[string]
keyword[return] identifier[self] . identifier[_adapter] . identifier[get] ( identifier[path] , identifier[wrap_ttl] = identifier[wrap_ttl] ). identifier[json] () | def lookup_token(self, token=None, accessor=False, wrap_ttl=None):
"""GET /auth/token/lookup/<token>
GET /auth/token/lookup-accessor/<token-accessor>
GET /auth/token/lookup-self
:param token:
:type token: str.
:param accessor:
:type accessor: str.
:param wrap_ttl:
:type wrap_ttl: int.
:return:
:rtype:
"""
token_param = {'token': token}
accessor_param = {'accessor': token}
if token:
if accessor:
path = '/v1/auth/token/lookup-accessor'
return self._adapter.post(path, json=accessor_param, wrap_ttl=wrap_ttl).json() # depends on [control=['if'], data=[]]
else:
path = '/v1/auth/token/lookup'
return self._adapter.post(path, json=token_param).json() # depends on [control=['if'], data=[]]
else:
path = '/v1/auth/token/lookup-self'
return self._adapter.get(path, wrap_ttl=wrap_ttl).json() |
def get_buildroot(self, worker_metadatas):
"""
Build the buildroot entry of the metadata.
:return: list, containing dicts of partial metadata
"""
buildroots = []
for platform in sorted(worker_metadatas.keys()):
for instance in worker_metadatas[platform]['buildroots']:
instance['id'] = '{}-{}'.format(platform, instance['id'])
buildroots.append(instance)
return buildroots | def function[get_buildroot, parameter[self, worker_metadatas]]:
constant[
Build the buildroot entry of the metadata.
:return: list, containing dicts of partial metadata
]
variable[buildroots] assign[=] list[[]]
for taget[name[platform]] in starred[call[name[sorted], parameter[call[name[worker_metadatas].keys, parameter[]]]]] begin[:]
for taget[name[instance]] in starred[call[call[name[worker_metadatas]][name[platform]]][constant[buildroots]]] begin[:]
call[name[instance]][constant[id]] assign[=] call[constant[{}-{}].format, parameter[name[platform], call[name[instance]][constant[id]]]]
call[name[buildroots].append, parameter[name[instance]]]
return[name[buildroots]] | keyword[def] identifier[get_buildroot] ( identifier[self] , identifier[worker_metadatas] ):
literal[string]
identifier[buildroots] =[]
keyword[for] identifier[platform] keyword[in] identifier[sorted] ( identifier[worker_metadatas] . identifier[keys] ()):
keyword[for] identifier[instance] keyword[in] identifier[worker_metadatas] [ identifier[platform] ][ literal[string] ]:
identifier[instance] [ literal[string] ]= literal[string] . identifier[format] ( identifier[platform] , identifier[instance] [ literal[string] ])
identifier[buildroots] . identifier[append] ( identifier[instance] )
keyword[return] identifier[buildroots] | def get_buildroot(self, worker_metadatas):
"""
Build the buildroot entry of the metadata.
:return: list, containing dicts of partial metadata
"""
buildroots = []
for platform in sorted(worker_metadatas.keys()):
for instance in worker_metadatas[platform]['buildroots']:
instance['id'] = '{}-{}'.format(platform, instance['id'])
buildroots.append(instance) # depends on [control=['for'], data=['instance']] # depends on [control=['for'], data=['platform']]
return buildroots |
def get_locations_list(self, lower_bound=0, upper_bound=None):
"""
Return the internal location list.
Args:
lower_bound:
upper_bound:
Returns:
"""
real_upper_bound = upper_bound
if upper_bound is None:
real_upper_bound = self.nbr_of_sub_locations()
try:
return self._locations_list[lower_bound:real_upper_bound]
except:
return list() | def function[get_locations_list, parameter[self, lower_bound, upper_bound]]:
constant[
Return the internal location list.
Args:
lower_bound:
upper_bound:
Returns:
]
variable[real_upper_bound] assign[=] name[upper_bound]
if compare[name[upper_bound] is constant[None]] begin[:]
variable[real_upper_bound] assign[=] call[name[self].nbr_of_sub_locations, parameter[]]
<ast.Try object at 0x7da2054a6110> | keyword[def] identifier[get_locations_list] ( identifier[self] , identifier[lower_bound] = literal[int] , identifier[upper_bound] = keyword[None] ):
literal[string]
identifier[real_upper_bound] = identifier[upper_bound]
keyword[if] identifier[upper_bound] keyword[is] keyword[None] :
identifier[real_upper_bound] = identifier[self] . identifier[nbr_of_sub_locations] ()
keyword[try] :
keyword[return] identifier[self] . identifier[_locations_list] [ identifier[lower_bound] : identifier[real_upper_bound] ]
keyword[except] :
keyword[return] identifier[list] () | def get_locations_list(self, lower_bound=0, upper_bound=None):
"""
Return the internal location list.
Args:
lower_bound:
upper_bound:
Returns:
"""
real_upper_bound = upper_bound
if upper_bound is None:
real_upper_bound = self.nbr_of_sub_locations() # depends on [control=['if'], data=[]]
try:
return self._locations_list[lower_bound:real_upper_bound] # depends on [control=['try'], data=[]]
except:
return list() # depends on [control=['except'], data=[]] |
def poisson(x,
layer_fn=tf.compat.v1.layers.dense,
log_rate_fn=lambda x: x,
name=None):
"""Constructs a trainable `tfd.Poisson` distribution.
This function creates a Poisson distribution parameterized by log rate.
Using default args, this function is mathematically equivalent to:
```none
Y = Poisson(log_rate=matmul(W, x) + b)
where,
W in R^[d, n]
b in R^d
```
#### Examples
This can be used as a [Poisson regression](
https://en.wikipedia.org/wiki/Poisson_regression) loss.
```python
# This example fits a poisson regression loss.
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
# Create fictitious training data.
dtype = np.float32
n = 3000 # number of samples
x_size = 4 # size of single x
def make_training_data():
np.random.seed(142)
x = np.random.randn(n, x_size).astype(dtype)
w = np.random.randn(x_size).astype(dtype)
b = np.random.randn(1).astype(dtype)
true_log_rate = np.tensordot(x, w, axes=[[-1], [-1]]) + b
y = np.random.poisson(lam=np.exp(true_log_rate)).astype(dtype)
return y, x
y, x = make_training_data()
# Build TF graph for fitting Poisson maximum likelihood estimator.
poisson = tfp.trainable_distributions.poisson(x)
loss = -tf.reduce_mean(poisson.log_prob(y))
train_op = tf.train.AdamOptimizer(learning_rate=2.**-5).minimize(loss)
mse = tf.reduce_mean(tf.squared_difference(y, poisson.mean()))
init_op = tf.global_variables_initializer()
# Run graph 1000 times.
num_steps = 1000
loss_ = np.zeros(num_steps) # Style: `_` to indicate sess.run result.
mse_ = np.zeros(num_steps)
with tf.Session() as sess:
sess.run(init_op)
for it in xrange(loss_.size):
_, loss_[it], mse_[it] = sess.run([train_op, loss, mse])
if it % 200 == 0 or it == loss_.size - 1:
print("iteration:{} loss:{} mse:{}".format(it, loss_[it], mse_[it]))
# ==> iteration:0 loss:37.0814208984 mse:6359.41259766
# iteration:200 loss:1.42010736465 mse:40.7654914856
# iteration:400 loss:1.39027583599 mse:8.77660560608
# iteration:600 loss:1.3902695179 mse:8.78443241119
# iteration:800 loss:1.39026939869 mse:8.78443622589
# iteration:999 loss:1.39026939869 mse:8.78444766998
```
Args:
x: `Tensor` with floating type. Must have statically defined rank and
statically known right-most dimension.
layer_fn: Python `callable` which takes input `x` and `int` scalar `d` and
returns a transformation of `x` with shape
`tf.concat([tf.shape(x)[:-1], [1]], axis=0)`.
Default value: `tf.layers.dense`.
log_rate_fn: Python `callable` which transforms the `log_rate` parameter.
Takes a (batch of) length-`dims` vectors and returns a `Tensor` of same
shape and `dtype`.
Default value: `lambda x: x`.
name: A `name_scope` name for operations created by this function.
Default value: `None` (i.e., "poisson").
Returns:
poisson: An instance of `tfd.Poisson`.
"""
with tf.compat.v1.name_scope(name, 'poisson', [x]):
x = tf.convert_to_tensor(value=x, name='x')
log_rate = log_rate_fn(tf.squeeze(layer_fn(x, 1), axis=-1))
return tfd.Poisson(log_rate=log_rate) | def function[poisson, parameter[x, layer_fn, log_rate_fn, name]]:
constant[Constructs a trainable `tfd.Poisson` distribution.
This function creates a Poisson distribution parameterized by log rate.
Using default args, this function is mathematically equivalent to:
```none
Y = Poisson(log_rate=matmul(W, x) + b)
where,
W in R^[d, n]
b in R^d
```
#### Examples
This can be used as a [Poisson regression](
https://en.wikipedia.org/wiki/Poisson_regression) loss.
```python
# This example fits a poisson regression loss.
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
# Create fictitious training data.
dtype = np.float32
n = 3000 # number of samples
x_size = 4 # size of single x
def make_training_data():
np.random.seed(142)
x = np.random.randn(n, x_size).astype(dtype)
w = np.random.randn(x_size).astype(dtype)
b = np.random.randn(1).astype(dtype)
true_log_rate = np.tensordot(x, w, axes=[[-1], [-1]]) + b
y = np.random.poisson(lam=np.exp(true_log_rate)).astype(dtype)
return y, x
y, x = make_training_data()
# Build TF graph for fitting Poisson maximum likelihood estimator.
poisson = tfp.trainable_distributions.poisson(x)
loss = -tf.reduce_mean(poisson.log_prob(y))
train_op = tf.train.AdamOptimizer(learning_rate=2.**-5).minimize(loss)
mse = tf.reduce_mean(tf.squared_difference(y, poisson.mean()))
init_op = tf.global_variables_initializer()
# Run graph 1000 times.
num_steps = 1000
loss_ = np.zeros(num_steps) # Style: `_` to indicate sess.run result.
mse_ = np.zeros(num_steps)
with tf.Session() as sess:
sess.run(init_op)
for it in xrange(loss_.size):
_, loss_[it], mse_[it] = sess.run([train_op, loss, mse])
if it % 200 == 0 or it == loss_.size - 1:
print("iteration:{} loss:{} mse:{}".format(it, loss_[it], mse_[it]))
# ==> iteration:0 loss:37.0814208984 mse:6359.41259766
# iteration:200 loss:1.42010736465 mse:40.7654914856
# iteration:400 loss:1.39027583599 mse:8.77660560608
# iteration:600 loss:1.3902695179 mse:8.78443241119
# iteration:800 loss:1.39026939869 mse:8.78443622589
# iteration:999 loss:1.39026939869 mse:8.78444766998
```
Args:
x: `Tensor` with floating type. Must have statically defined rank and
statically known right-most dimension.
layer_fn: Python `callable` which takes input `x` and `int` scalar `d` and
returns a transformation of `x` with shape
`tf.concat([tf.shape(x)[:-1], [1]], axis=0)`.
Default value: `tf.layers.dense`.
log_rate_fn: Python `callable` which transforms the `log_rate` parameter.
Takes a (batch of) length-`dims` vectors and returns a `Tensor` of same
shape and `dtype`.
Default value: `lambda x: x`.
name: A `name_scope` name for operations created by this function.
Default value: `None` (i.e., "poisson").
Returns:
poisson: An instance of `tfd.Poisson`.
]
with call[name[tf].compat.v1.name_scope, parameter[name[name], constant[poisson], list[[<ast.Name object at 0x7da1b02c85b0>]]]] begin[:]
variable[x] assign[=] call[name[tf].convert_to_tensor, parameter[]]
variable[log_rate] assign[=] call[name[log_rate_fn], parameter[call[name[tf].squeeze, parameter[call[name[layer_fn], parameter[name[x], constant[1]]]]]]]
return[call[name[tfd].Poisson, parameter[]]] | keyword[def] identifier[poisson] ( identifier[x] ,
identifier[layer_fn] = identifier[tf] . identifier[compat] . identifier[v1] . identifier[layers] . identifier[dense] ,
identifier[log_rate_fn] = keyword[lambda] identifier[x] : identifier[x] ,
identifier[name] = keyword[None] ):
literal[string]
keyword[with] identifier[tf] . identifier[compat] . identifier[v1] . identifier[name_scope] ( identifier[name] , literal[string] ,[ identifier[x] ]):
identifier[x] = identifier[tf] . identifier[convert_to_tensor] ( identifier[value] = identifier[x] , identifier[name] = literal[string] )
identifier[log_rate] = identifier[log_rate_fn] ( identifier[tf] . identifier[squeeze] ( identifier[layer_fn] ( identifier[x] , literal[int] ), identifier[axis] =- literal[int] ))
keyword[return] identifier[tfd] . identifier[Poisson] ( identifier[log_rate] = identifier[log_rate] ) | def poisson(x, layer_fn=tf.compat.v1.layers.dense, log_rate_fn=lambda x: x, name=None):
"""Constructs a trainable `tfd.Poisson` distribution.
This function creates a Poisson distribution parameterized by log rate.
Using default args, this function is mathematically equivalent to:
```none
Y = Poisson(log_rate=matmul(W, x) + b)
where,
W in R^[d, n]
b in R^d
```
#### Examples
This can be used as a [Poisson regression](
https://en.wikipedia.org/wiki/Poisson_regression) loss.
```python
# This example fits a poisson regression loss.
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
# Create fictitious training data.
dtype = np.float32
n = 3000 # number of samples
x_size = 4 # size of single x
def make_training_data():
np.random.seed(142)
x = np.random.randn(n, x_size).astype(dtype)
w = np.random.randn(x_size).astype(dtype)
b = np.random.randn(1).astype(dtype)
true_log_rate = np.tensordot(x, w, axes=[[-1], [-1]]) + b
y = np.random.poisson(lam=np.exp(true_log_rate)).astype(dtype)
return y, x
y, x = make_training_data()
# Build TF graph for fitting Poisson maximum likelihood estimator.
poisson = tfp.trainable_distributions.poisson(x)
loss = -tf.reduce_mean(poisson.log_prob(y))
train_op = tf.train.AdamOptimizer(learning_rate=2.**-5).minimize(loss)
mse = tf.reduce_mean(tf.squared_difference(y, poisson.mean()))
init_op = tf.global_variables_initializer()
# Run graph 1000 times.
num_steps = 1000
loss_ = np.zeros(num_steps) # Style: `_` to indicate sess.run result.
mse_ = np.zeros(num_steps)
with tf.Session() as sess:
sess.run(init_op)
for it in xrange(loss_.size):
_, loss_[it], mse_[it] = sess.run([train_op, loss, mse])
if it % 200 == 0 or it == loss_.size - 1:
print("iteration:{} loss:{} mse:{}".format(it, loss_[it], mse_[it]))
# ==> iteration:0 loss:37.0814208984 mse:6359.41259766
# iteration:200 loss:1.42010736465 mse:40.7654914856
# iteration:400 loss:1.39027583599 mse:8.77660560608
# iteration:600 loss:1.3902695179 mse:8.78443241119
# iteration:800 loss:1.39026939869 mse:8.78443622589
# iteration:999 loss:1.39026939869 mse:8.78444766998
```
Args:
x: `Tensor` with floating type. Must have statically defined rank and
statically known right-most dimension.
layer_fn: Python `callable` which takes input `x` and `int` scalar `d` and
returns a transformation of `x` with shape
`tf.concat([tf.shape(x)[:-1], [1]], axis=0)`.
Default value: `tf.layers.dense`.
log_rate_fn: Python `callable` which transforms the `log_rate` parameter.
Takes a (batch of) length-`dims` vectors and returns a `Tensor` of same
shape and `dtype`.
Default value: `lambda x: x`.
name: A `name_scope` name for operations created by this function.
Default value: `None` (i.e., "poisson").
Returns:
poisson: An instance of `tfd.Poisson`.
"""
with tf.compat.v1.name_scope(name, 'poisson', [x]):
x = tf.convert_to_tensor(value=x, name='x')
log_rate = log_rate_fn(tf.squeeze(layer_fn(x, 1), axis=-1))
return tfd.Poisson(log_rate=log_rate) # depends on [control=['with'], data=[]] |
def cftime_to_timestamp(date, time_unit='us'):
"""Converts cftime to timestamp since epoch in milliseconds
Non-standard calendars (e.g. Julian or no leap calendars)
are converted to standard Gregorian calendar. This can cause
extra space to be added for dates that don't exist in the original
calendar. In order to handle these dates correctly a custom bokeh
model with support for other calendars would have to be defined.
Args:
date: cftime datetime object (or array)
Returns:
time_unit since 1970-01-01 00:00:00
"""
import cftime
utime = cftime.utime('microseconds since 1970-01-01 00:00:00')
if time_unit == 'us':
tscale = 1
else:
tscale = (np.timedelta64(1, 'us')/np.timedelta64(1, time_unit))
return utime.date2num(date)*tscale | def function[cftime_to_timestamp, parameter[date, time_unit]]:
constant[Converts cftime to timestamp since epoch in milliseconds
Non-standard calendars (e.g. Julian or no leap calendars)
are converted to standard Gregorian calendar. This can cause
extra space to be added for dates that don't exist in the original
calendar. In order to handle these dates correctly a custom bokeh
model with support for other calendars would have to be defined.
Args:
date: cftime datetime object (or array)
Returns:
time_unit since 1970-01-01 00:00:00
]
import module[cftime]
variable[utime] assign[=] call[name[cftime].utime, parameter[constant[microseconds since 1970-01-01 00:00:00]]]
if compare[name[time_unit] equal[==] constant[us]] begin[:]
variable[tscale] assign[=] constant[1]
return[binary_operation[call[name[utime].date2num, parameter[name[date]]] * name[tscale]]] | keyword[def] identifier[cftime_to_timestamp] ( identifier[date] , identifier[time_unit] = literal[string] ):
literal[string]
keyword[import] identifier[cftime]
identifier[utime] = identifier[cftime] . identifier[utime] ( literal[string] )
keyword[if] identifier[time_unit] == literal[string] :
identifier[tscale] = literal[int]
keyword[else] :
identifier[tscale] =( identifier[np] . identifier[timedelta64] ( literal[int] , literal[string] )/ identifier[np] . identifier[timedelta64] ( literal[int] , identifier[time_unit] ))
keyword[return] identifier[utime] . identifier[date2num] ( identifier[date] )* identifier[tscale] | def cftime_to_timestamp(date, time_unit='us'):
"""Converts cftime to timestamp since epoch in milliseconds
Non-standard calendars (e.g. Julian or no leap calendars)
are converted to standard Gregorian calendar. This can cause
extra space to be added for dates that don't exist in the original
calendar. In order to handle these dates correctly a custom bokeh
model with support for other calendars would have to be defined.
Args:
date: cftime datetime object (or array)
Returns:
time_unit since 1970-01-01 00:00:00
"""
import cftime
utime = cftime.utime('microseconds since 1970-01-01 00:00:00')
if time_unit == 'us':
tscale = 1 # depends on [control=['if'], data=[]]
else:
tscale = np.timedelta64(1, 'us') / np.timedelta64(1, time_unit)
return utime.date2num(date) * tscale |
def _psi_n(x, n, b):
"""
Compute the n-th term in the infinite sum of
the Jacobi density.
"""
return 2**(b-1) / gamma(b) * (-1)**n * \
np.exp(gammaln(n+b) -
gammaln(n+1) +
np.log(2*n+b) -
0.5 * np.log(2*np.pi*x**3) -
(2*n+b)**2 / (8.*x)) | def function[_psi_n, parameter[x, n, b]]:
constant[
Compute the n-th term in the infinite sum of
the Jacobi density.
]
return[binary_operation[binary_operation[binary_operation[binary_operation[constant[2] ** binary_operation[name[b] - constant[1]]] / call[name[gamma], parameter[name[b]]]] * binary_operation[<ast.UnaryOp object at 0x7da1b1a134f0> ** name[n]]] * call[name[np].exp, parameter[binary_operation[binary_operation[binary_operation[binary_operation[call[name[gammaln], parameter[binary_operation[name[n] + name[b]]]] - call[name[gammaln], parameter[binary_operation[name[n] + constant[1]]]]] + call[name[np].log, parameter[binary_operation[binary_operation[constant[2] * name[n]] + name[b]]]]] - binary_operation[constant[0.5] * call[name[np].log, parameter[binary_operation[binary_operation[constant[2] * name[np].pi] * binary_operation[name[x] ** constant[3]]]]]]] - binary_operation[binary_operation[binary_operation[binary_operation[constant[2] * name[n]] + name[b]] ** constant[2]] / binary_operation[constant[8.0] * name[x]]]]]]]] | keyword[def] identifier[_psi_n] ( identifier[x] , identifier[n] , identifier[b] ):
literal[string]
keyword[return] literal[int] **( identifier[b] - literal[int] )/ identifier[gamma] ( identifier[b] )*(- literal[int] )** identifier[n] * identifier[np] . identifier[exp] ( identifier[gammaln] ( identifier[n] + identifier[b] )-
identifier[gammaln] ( identifier[n] + literal[int] )+
identifier[np] . identifier[log] ( literal[int] * identifier[n] + identifier[b] )-
literal[int] * identifier[np] . identifier[log] ( literal[int] * identifier[np] . identifier[pi] * identifier[x] ** literal[int] )-
( literal[int] * identifier[n] + identifier[b] )** literal[int] /( literal[int] * identifier[x] )) | def _psi_n(x, n, b):
"""
Compute the n-th term in the infinite sum of
the Jacobi density.
"""
return 2 ** (b - 1) / gamma(b) * (-1) ** n * np.exp(gammaln(n + b) - gammaln(n + 1) + np.log(2 * n + b) - 0.5 * np.log(2 * np.pi * x ** 3) - (2 * n + b) ** 2 / (8.0 * x)) |
def _build_proxy_contract_creation_constructor(self,
master_copy: str,
initializer: bytes,
funder: str,
payment_token: str,
payment: int) -> ContractConstructor:
"""
:param master_copy: Master Copy of Gnosis Safe already deployed
:param initializer: Data initializer to send to GnosisSafe setup method
:param funder: Address that should get the payment (if payment set)
:param payment_token: Address if a token is used. If not set, 0x0 will be ether
:param payment: Payment
:return: Transaction dictionary
"""
if not funder or funder == NULL_ADDRESS:
funder = NULL_ADDRESS
payment = 0
return get_paying_proxy_contract(self.w3).constructor(
master_copy,
initializer,
funder,
payment_token,
payment) | def function[_build_proxy_contract_creation_constructor, parameter[self, master_copy, initializer, funder, payment_token, payment]]:
constant[
:param master_copy: Master Copy of Gnosis Safe already deployed
:param initializer: Data initializer to send to GnosisSafe setup method
:param funder: Address that should get the payment (if payment set)
:param payment_token: Address if a token is used. If not set, 0x0 will be ether
:param payment: Payment
:return: Transaction dictionary
]
if <ast.BoolOp object at 0x7da18dc07ac0> begin[:]
variable[funder] assign[=] name[NULL_ADDRESS]
variable[payment] assign[=] constant[0]
return[call[call[name[get_paying_proxy_contract], parameter[name[self].w3]].constructor, parameter[name[master_copy], name[initializer], name[funder], name[payment_token], name[payment]]]] | keyword[def] identifier[_build_proxy_contract_creation_constructor] ( identifier[self] ,
identifier[master_copy] : identifier[str] ,
identifier[initializer] : identifier[bytes] ,
identifier[funder] : identifier[str] ,
identifier[payment_token] : identifier[str] ,
identifier[payment] : identifier[int] )-> identifier[ContractConstructor] :
literal[string]
keyword[if] keyword[not] identifier[funder] keyword[or] identifier[funder] == identifier[NULL_ADDRESS] :
identifier[funder] = identifier[NULL_ADDRESS]
identifier[payment] = literal[int]
keyword[return] identifier[get_paying_proxy_contract] ( identifier[self] . identifier[w3] ). identifier[constructor] (
identifier[master_copy] ,
identifier[initializer] ,
identifier[funder] ,
identifier[payment_token] ,
identifier[payment] ) | def _build_proxy_contract_creation_constructor(self, master_copy: str, initializer: bytes, funder: str, payment_token: str, payment: int) -> ContractConstructor:
"""
:param master_copy: Master Copy of Gnosis Safe already deployed
:param initializer: Data initializer to send to GnosisSafe setup method
:param funder: Address that should get the payment (if payment set)
:param payment_token: Address if a token is used. If not set, 0x0 will be ether
:param payment: Payment
:return: Transaction dictionary
"""
if not funder or funder == NULL_ADDRESS:
funder = NULL_ADDRESS
payment = 0 # depends on [control=['if'], data=[]]
return get_paying_proxy_contract(self.w3).constructor(master_copy, initializer, funder, payment_token, payment) |
def endpoint_class(collection):
"""Return the :class:`sandman.model.Model` associated with the endpoint
*collection*.
:param string collection: a :class:`sandman.model.Model` endpoint
:rtype: :class:`sandman.model.Model`
"""
with app.app_context():
try:
cls = current_app.class_references[collection]
except KeyError:
raise InvalidAPIUsage(404)
return cls | def function[endpoint_class, parameter[collection]]:
constant[Return the :class:`sandman.model.Model` associated with the endpoint
*collection*.
:param string collection: a :class:`sandman.model.Model` endpoint
:rtype: :class:`sandman.model.Model`
]
with call[name[app].app_context, parameter[]] begin[:]
<ast.Try object at 0x7da18f58d300>
return[name[cls]] | keyword[def] identifier[endpoint_class] ( identifier[collection] ):
literal[string]
keyword[with] identifier[app] . identifier[app_context] ():
keyword[try] :
identifier[cls] = identifier[current_app] . identifier[class_references] [ identifier[collection] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[InvalidAPIUsage] ( literal[int] )
keyword[return] identifier[cls] | def endpoint_class(collection):
"""Return the :class:`sandman.model.Model` associated with the endpoint
*collection*.
:param string collection: a :class:`sandman.model.Model` endpoint
:rtype: :class:`sandman.model.Model`
"""
with app.app_context():
try:
cls = current_app.class_references[collection] # depends on [control=['try'], data=[]]
except KeyError:
raise InvalidAPIUsage(404) # depends on [control=['except'], data=[]]
return cls # depends on [control=['with'], data=[]] |
def extent_to_array(extent, source_crs, dest_crs=None):
"""Convert the supplied extent to geographic and return as an array.
:param extent: Rectangle defining a spatial extent in any CRS.
:type extent: QgsRectangle
:param source_crs: Coordinate system used for input extent.
:type source_crs: QgsCoordinateReferenceSystem
:param dest_crs: Coordinate system used for output extent. Defaults to
EPSG:4326 if not specified.
:type dest_crs: QgsCoordinateReferenceSystem
:returns: a list in the form [xmin, ymin, xmax, ymax] where all
coordinates provided are in Geographic / EPSG:4326.
:rtype: list
"""
if dest_crs is None:
geo_crs = QgsCoordinateReferenceSystem()
geo_crs.createFromSrid(4326)
else:
geo_crs = dest_crs
transform = QgsCoordinateTransform(source_crs, geo_crs,
QgsProject.instance())
# Get the clip area in the layer's crs
transformed_extent = transform.transformBoundingBox(extent)
geo_extent = [
transformed_extent.xMinimum(),
transformed_extent.yMinimum(),
transformed_extent.xMaximum(),
transformed_extent.yMaximum()]
return geo_extent | def function[extent_to_array, parameter[extent, source_crs, dest_crs]]:
constant[Convert the supplied extent to geographic and return as an array.
:param extent: Rectangle defining a spatial extent in any CRS.
:type extent: QgsRectangle
:param source_crs: Coordinate system used for input extent.
:type source_crs: QgsCoordinateReferenceSystem
:param dest_crs: Coordinate system used for output extent. Defaults to
EPSG:4326 if not specified.
:type dest_crs: QgsCoordinateReferenceSystem
:returns: a list in the form [xmin, ymin, xmax, ymax] where all
coordinates provided are in Geographic / EPSG:4326.
:rtype: list
]
if compare[name[dest_crs] is constant[None]] begin[:]
variable[geo_crs] assign[=] call[name[QgsCoordinateReferenceSystem], parameter[]]
call[name[geo_crs].createFromSrid, parameter[constant[4326]]]
variable[transform] assign[=] call[name[QgsCoordinateTransform], parameter[name[source_crs], name[geo_crs], call[name[QgsProject].instance, parameter[]]]]
variable[transformed_extent] assign[=] call[name[transform].transformBoundingBox, parameter[name[extent]]]
variable[geo_extent] assign[=] list[[<ast.Call object at 0x7da18f00ec80>, <ast.Call object at 0x7da18f00c160>, <ast.Call object at 0x7da18f00c040>, <ast.Call object at 0x7da18f00f4f0>]]
return[name[geo_extent]] | keyword[def] identifier[extent_to_array] ( identifier[extent] , identifier[source_crs] , identifier[dest_crs] = keyword[None] ):
literal[string]
keyword[if] identifier[dest_crs] keyword[is] keyword[None] :
identifier[geo_crs] = identifier[QgsCoordinateReferenceSystem] ()
identifier[geo_crs] . identifier[createFromSrid] ( literal[int] )
keyword[else] :
identifier[geo_crs] = identifier[dest_crs]
identifier[transform] = identifier[QgsCoordinateTransform] ( identifier[source_crs] , identifier[geo_crs] ,
identifier[QgsProject] . identifier[instance] ())
identifier[transformed_extent] = identifier[transform] . identifier[transformBoundingBox] ( identifier[extent] )
identifier[geo_extent] =[
identifier[transformed_extent] . identifier[xMinimum] (),
identifier[transformed_extent] . identifier[yMinimum] (),
identifier[transformed_extent] . identifier[xMaximum] (),
identifier[transformed_extent] . identifier[yMaximum] ()]
keyword[return] identifier[geo_extent] | def extent_to_array(extent, source_crs, dest_crs=None):
"""Convert the supplied extent to geographic and return as an array.
:param extent: Rectangle defining a spatial extent in any CRS.
:type extent: QgsRectangle
:param source_crs: Coordinate system used for input extent.
:type source_crs: QgsCoordinateReferenceSystem
:param dest_crs: Coordinate system used for output extent. Defaults to
EPSG:4326 if not specified.
:type dest_crs: QgsCoordinateReferenceSystem
:returns: a list in the form [xmin, ymin, xmax, ymax] where all
coordinates provided are in Geographic / EPSG:4326.
:rtype: list
"""
if dest_crs is None:
geo_crs = QgsCoordinateReferenceSystem()
geo_crs.createFromSrid(4326) # depends on [control=['if'], data=[]]
else:
geo_crs = dest_crs
transform = QgsCoordinateTransform(source_crs, geo_crs, QgsProject.instance())
# Get the clip area in the layer's crs
transformed_extent = transform.transformBoundingBox(extent)
geo_extent = [transformed_extent.xMinimum(), transformed_extent.yMinimum(), transformed_extent.xMaximum(), transformed_extent.yMaximum()]
return geo_extent |
def args_str(self):
"""
Return an args string for the repr.
"""
matched = [str(m) for m in self._matchers[:self._position]]
unmatched = [str(m) for m in self._matchers[self._position:]]
return 'matched=[{}], unmatched=[{}]'.format(
', '.join(matched), ', '.join(unmatched)) | def function[args_str, parameter[self]]:
constant[
Return an args string for the repr.
]
variable[matched] assign[=] <ast.ListComp object at 0x7da1b242ad70>
variable[unmatched] assign[=] <ast.ListComp object at 0x7da1b2428670>
return[call[constant[matched=[{}], unmatched=[{}]].format, parameter[call[constant[, ].join, parameter[name[matched]]], call[constant[, ].join, parameter[name[unmatched]]]]]] | keyword[def] identifier[args_str] ( identifier[self] ):
literal[string]
identifier[matched] =[ identifier[str] ( identifier[m] ) keyword[for] identifier[m] keyword[in] identifier[self] . identifier[_matchers] [: identifier[self] . identifier[_position] ]]
identifier[unmatched] =[ identifier[str] ( identifier[m] ) keyword[for] identifier[m] keyword[in] identifier[self] . identifier[_matchers] [ identifier[self] . identifier[_position] :]]
keyword[return] literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[matched] ), literal[string] . identifier[join] ( identifier[unmatched] )) | def args_str(self):
"""
Return an args string for the repr.
"""
matched = [str(m) for m in self._matchers[:self._position]]
unmatched = [str(m) for m in self._matchers[self._position:]]
return 'matched=[{}], unmatched=[{}]'.format(', '.join(matched), ', '.join(unmatched)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.