code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def get_export_launch_description_form(self):
"""Returns a form for editing the virtual system description.
Since the data for the form are fetched from the cloud a
progress object is also returned to indicate if/when the form
is ready to be used.
out form of type :class:`IVirtualSystemDescriptionForm`
An IForm instance for editing the virtual system description.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
"""
(progress, form) = self._call("getExportLaunchDescriptionForm")
progress = IProgress(progress)
form = IVirtualSystemDescriptionForm(form)
return (progress, form) | def function[get_export_launch_description_form, parameter[self]]:
constant[Returns a form for editing the virtual system description.
Since the data for the form are fetched from the cloud a
progress object is also returned to indicate if/when the form
is ready to be used.
out form of type :class:`IVirtualSystemDescriptionForm`
An IForm instance for editing the virtual system description.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
]
<ast.Tuple object at 0x7da204346f50> assign[=] call[name[self]._call, parameter[constant[getExportLaunchDescriptionForm]]]
variable[progress] assign[=] call[name[IProgress], parameter[name[progress]]]
variable[form] assign[=] call[name[IVirtualSystemDescriptionForm], parameter[name[form]]]
return[tuple[[<ast.Name object at 0x7da204344f70>, <ast.Name object at 0x7da204347430>]]] | keyword[def] identifier[get_export_launch_description_form] ( identifier[self] ):
literal[string]
( identifier[progress] , identifier[form] )= identifier[self] . identifier[_call] ( literal[string] )
identifier[progress] = identifier[IProgress] ( identifier[progress] )
identifier[form] = identifier[IVirtualSystemDescriptionForm] ( identifier[form] )
keyword[return] ( identifier[progress] , identifier[form] ) | def get_export_launch_description_form(self):
"""Returns a form for editing the virtual system description.
Since the data for the form are fetched from the cloud a
progress object is also returned to indicate if/when the form
is ready to be used.
out form of type :class:`IVirtualSystemDescriptionForm`
An IForm instance for editing the virtual system description.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
"""
(progress, form) = self._call('getExportLaunchDescriptionForm')
progress = IProgress(progress)
form = IVirtualSystemDescriptionForm(form)
return (progress, form) |
def dispatch(self, producer=None):
"""
Dispatch the event, sending a message to the queue using a producer.
:param producer: optional `Producer` to replace the default one.
"""
log.info('@Event.dispatch `{}` with subject `{}`'
.format(self.name, self.subject))
producer = (producer or Registry.get_producer())
if not producer:
raise MissingProducerError('You have not registered a Producer')
try:
producer.produce(self.topic, self.name, self.subject, self.data)
except:
fallback = Registry.get_fallback()
fallback(self)
raise | def function[dispatch, parameter[self, producer]]:
constant[
Dispatch the event, sending a message to the queue using a producer.
:param producer: optional `Producer` to replace the default one.
]
call[name[log].info, parameter[call[constant[@Event.dispatch `{}` with subject `{}`].format, parameter[name[self].name, name[self].subject]]]]
variable[producer] assign[=] <ast.BoolOp object at 0x7da18f721d50>
if <ast.UnaryOp object at 0x7da18f7232b0> begin[:]
<ast.Raise object at 0x7da18f721e10>
<ast.Try object at 0x7da18f720eb0> | keyword[def] identifier[dispatch] ( identifier[self] , identifier[producer] = keyword[None] ):
literal[string]
identifier[log] . identifier[info] ( literal[string]
. identifier[format] ( identifier[self] . identifier[name] , identifier[self] . identifier[subject] ))
identifier[producer] =( identifier[producer] keyword[or] identifier[Registry] . identifier[get_producer] ())
keyword[if] keyword[not] identifier[producer] :
keyword[raise] identifier[MissingProducerError] ( literal[string] )
keyword[try] :
identifier[producer] . identifier[produce] ( identifier[self] . identifier[topic] , identifier[self] . identifier[name] , identifier[self] . identifier[subject] , identifier[self] . identifier[data] )
keyword[except] :
identifier[fallback] = identifier[Registry] . identifier[get_fallback] ()
identifier[fallback] ( identifier[self] )
keyword[raise] | def dispatch(self, producer=None):
"""
Dispatch the event, sending a message to the queue using a producer.
:param producer: optional `Producer` to replace the default one.
"""
log.info('@Event.dispatch `{}` with subject `{}`'.format(self.name, self.subject))
producer = producer or Registry.get_producer()
if not producer:
raise MissingProducerError('You have not registered a Producer') # depends on [control=['if'], data=[]]
try:
producer.produce(self.topic, self.name, self.subject, self.data) # depends on [control=['try'], data=[]]
except:
fallback = Registry.get_fallback()
fallback(self)
raise # depends on [control=['except'], data=[]] |
def onecmd_plus_hooks(self, line: str, pyscript_bridge_call: bool = False) -> bool:
"""Top-level function called by cmdloop() to handle parsing a line and running the command and all of its hooks.
:param line: line of text read from input
:param pyscript_bridge_call: This should only ever be set to True by PyscriptBridge to signify the beginning
of an app() call in a pyscript. It is used to enable/disable the storage of the
command's stdout.
:return: True if cmdloop() should exit, False otherwise
"""
import datetime
stop = False
try:
statement = self._complete_statement(line)
except EmptyStatement:
return self._run_cmdfinalization_hooks(stop, None)
except ValueError as ex:
# If shlex.split failed on syntax, let user know whats going on
self.perror("Invalid syntax: {}".format(ex), traceback_war=False)
return stop
# now that we have a statement, run it with all the hooks
try:
# call the postparsing hooks
data = plugin.PostparsingData(False, statement)
for func in self._postparsing_hooks:
data = func(data)
if data.stop:
break
# unpack the data object
statement = data.statement
stop = data.stop
if stop:
# we should not run the command, but
# we need to run the finalization hooks
raise EmptyStatement
# Keep track of whether or not we were already redirecting before this command
already_redirecting = self.redirecting
# This will be a utils.RedirectionSavedState object for the command
saved_state = None
try:
# Get sigint protection while we set up redirection
with self.sigint_protection:
if pyscript_bridge_call:
# Start saving command's stdout at this point
self.stdout.pause_storage = False
redir_error, saved_state = self._redirect_output(statement)
self.cur_pipe_proc_reader = saved_state.pipe_proc_reader
# Do not continue if an error occurred while trying to redirect
if not redir_error:
# See if we need to update self.redirecting
if not already_redirecting:
self.redirecting = saved_state.redirecting
timestart = datetime.datetime.now()
# precommand hooks
data = plugin.PrecommandData(statement)
for func in self._precmd_hooks:
data = func(data)
statement = data.statement
# call precmd() for compatibility with cmd.Cmd
statement = self.precmd(statement)
# go run the command function
stop = self.onecmd(statement)
# postcommand hooks
data = plugin.PostcommandData(stop, statement)
for func in self._postcmd_hooks:
data = func(data)
# retrieve the final value of stop, ignoring any statement modification from the hooks
stop = data.stop
# call postcmd() for compatibility with cmd.Cmd
stop = self.postcmd(stop, statement)
if self.timing:
self.pfeedback('Elapsed: {}'.format(datetime.datetime.now() - timestart))
finally:
# Get sigint protection while we restore stuff
with self.sigint_protection:
if saved_state is not None:
self._restore_output(statement, saved_state)
if not already_redirecting:
self.redirecting = False
if pyscript_bridge_call:
# Stop saving command's stdout before command finalization hooks run
self.stdout.pause_storage = True
except EmptyStatement:
# don't do anything, but do allow command finalization hooks to run
pass
except Exception as ex:
self.perror(ex)
finally:
return self._run_cmdfinalization_hooks(stop, statement) | def function[onecmd_plus_hooks, parameter[self, line, pyscript_bridge_call]]:
constant[Top-level function called by cmdloop() to handle parsing a line and running the command and all of its hooks.
:param line: line of text read from input
:param pyscript_bridge_call: This should only ever be set to True by PyscriptBridge to signify the beginning
of an app() call in a pyscript. It is used to enable/disable the storage of the
command's stdout.
:return: True if cmdloop() should exit, False otherwise
]
import module[datetime]
variable[stop] assign[=] constant[False]
<ast.Try object at 0x7da207f99e10>
<ast.Try object at 0x7da20c6c6320> | keyword[def] identifier[onecmd_plus_hooks] ( identifier[self] , identifier[line] : identifier[str] , identifier[pyscript_bridge_call] : identifier[bool] = keyword[False] )-> identifier[bool] :
literal[string]
keyword[import] identifier[datetime]
identifier[stop] = keyword[False]
keyword[try] :
identifier[statement] = identifier[self] . identifier[_complete_statement] ( identifier[line] )
keyword[except] identifier[EmptyStatement] :
keyword[return] identifier[self] . identifier[_run_cmdfinalization_hooks] ( identifier[stop] , keyword[None] )
keyword[except] identifier[ValueError] keyword[as] identifier[ex] :
identifier[self] . identifier[perror] ( literal[string] . identifier[format] ( identifier[ex] ), identifier[traceback_war] = keyword[False] )
keyword[return] identifier[stop]
keyword[try] :
identifier[data] = identifier[plugin] . identifier[PostparsingData] ( keyword[False] , identifier[statement] )
keyword[for] identifier[func] keyword[in] identifier[self] . identifier[_postparsing_hooks] :
identifier[data] = identifier[func] ( identifier[data] )
keyword[if] identifier[data] . identifier[stop] :
keyword[break]
identifier[statement] = identifier[data] . identifier[statement]
identifier[stop] = identifier[data] . identifier[stop]
keyword[if] identifier[stop] :
keyword[raise] identifier[EmptyStatement]
identifier[already_redirecting] = identifier[self] . identifier[redirecting]
identifier[saved_state] = keyword[None]
keyword[try] :
keyword[with] identifier[self] . identifier[sigint_protection] :
keyword[if] identifier[pyscript_bridge_call] :
identifier[self] . identifier[stdout] . identifier[pause_storage] = keyword[False]
identifier[redir_error] , identifier[saved_state] = identifier[self] . identifier[_redirect_output] ( identifier[statement] )
identifier[self] . identifier[cur_pipe_proc_reader] = identifier[saved_state] . identifier[pipe_proc_reader]
keyword[if] keyword[not] identifier[redir_error] :
keyword[if] keyword[not] identifier[already_redirecting] :
identifier[self] . identifier[redirecting] = identifier[saved_state] . identifier[redirecting]
identifier[timestart] = identifier[datetime] . identifier[datetime] . identifier[now] ()
identifier[data] = identifier[plugin] . identifier[PrecommandData] ( identifier[statement] )
keyword[for] identifier[func] keyword[in] identifier[self] . identifier[_precmd_hooks] :
identifier[data] = identifier[func] ( identifier[data] )
identifier[statement] = identifier[data] . identifier[statement]
identifier[statement] = identifier[self] . identifier[precmd] ( identifier[statement] )
identifier[stop] = identifier[self] . identifier[onecmd] ( identifier[statement] )
identifier[data] = identifier[plugin] . identifier[PostcommandData] ( identifier[stop] , identifier[statement] )
keyword[for] identifier[func] keyword[in] identifier[self] . identifier[_postcmd_hooks] :
identifier[data] = identifier[func] ( identifier[data] )
identifier[stop] = identifier[data] . identifier[stop]
identifier[stop] = identifier[self] . identifier[postcmd] ( identifier[stop] , identifier[statement] )
keyword[if] identifier[self] . identifier[timing] :
identifier[self] . identifier[pfeedback] ( literal[string] . identifier[format] ( identifier[datetime] . identifier[datetime] . identifier[now] ()- identifier[timestart] ))
keyword[finally] :
keyword[with] identifier[self] . identifier[sigint_protection] :
keyword[if] identifier[saved_state] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_restore_output] ( identifier[statement] , identifier[saved_state] )
keyword[if] keyword[not] identifier[already_redirecting] :
identifier[self] . identifier[redirecting] = keyword[False]
keyword[if] identifier[pyscript_bridge_call] :
identifier[self] . identifier[stdout] . identifier[pause_storage] = keyword[True]
keyword[except] identifier[EmptyStatement] :
keyword[pass]
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[self] . identifier[perror] ( identifier[ex] )
keyword[finally] :
keyword[return] identifier[self] . identifier[_run_cmdfinalization_hooks] ( identifier[stop] , identifier[statement] ) | def onecmd_plus_hooks(self, line: str, pyscript_bridge_call: bool=False) -> bool:
"""Top-level function called by cmdloop() to handle parsing a line and running the command and all of its hooks.
:param line: line of text read from input
:param pyscript_bridge_call: This should only ever be set to True by PyscriptBridge to signify the beginning
of an app() call in a pyscript. It is used to enable/disable the storage of the
command's stdout.
:return: True if cmdloop() should exit, False otherwise
"""
import datetime
stop = False
try:
statement = self._complete_statement(line) # depends on [control=['try'], data=[]]
except EmptyStatement:
return self._run_cmdfinalization_hooks(stop, None) # depends on [control=['except'], data=[]]
except ValueError as ex:
# If shlex.split failed on syntax, let user know whats going on
self.perror('Invalid syntax: {}'.format(ex), traceback_war=False)
return stop # depends on [control=['except'], data=['ex']]
# now that we have a statement, run it with all the hooks
try:
# call the postparsing hooks
data = plugin.PostparsingData(False, statement)
for func in self._postparsing_hooks:
data = func(data)
if data.stop:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['func']]
# unpack the data object
statement = data.statement
stop = data.stop
if stop:
# we should not run the command, but
# we need to run the finalization hooks
raise EmptyStatement # depends on [control=['if'], data=[]]
# Keep track of whether or not we were already redirecting before this command
already_redirecting = self.redirecting
# This will be a utils.RedirectionSavedState object for the command
saved_state = None
try:
# Get sigint protection while we set up redirection
with self.sigint_protection:
if pyscript_bridge_call:
# Start saving command's stdout at this point
self.stdout.pause_storage = False # depends on [control=['if'], data=[]]
(redir_error, saved_state) = self._redirect_output(statement)
self.cur_pipe_proc_reader = saved_state.pipe_proc_reader # depends on [control=['with'], data=[]]
# Do not continue if an error occurred while trying to redirect
if not redir_error:
# See if we need to update self.redirecting
if not already_redirecting:
self.redirecting = saved_state.redirecting # depends on [control=['if'], data=[]]
timestart = datetime.datetime.now()
# precommand hooks
data = plugin.PrecommandData(statement)
for func in self._precmd_hooks:
data = func(data) # depends on [control=['for'], data=['func']]
statement = data.statement
# call precmd() for compatibility with cmd.Cmd
statement = self.precmd(statement)
# go run the command function
stop = self.onecmd(statement)
# postcommand hooks
data = plugin.PostcommandData(stop, statement)
for func in self._postcmd_hooks:
data = func(data) # depends on [control=['for'], data=['func']]
# retrieve the final value of stop, ignoring any statement modification from the hooks
stop = data.stop
# call postcmd() for compatibility with cmd.Cmd
stop = self.postcmd(stop, statement)
if self.timing:
self.pfeedback('Elapsed: {}'.format(datetime.datetime.now() - timestart)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
finally:
# Get sigint protection while we restore stuff
with self.sigint_protection:
if saved_state is not None:
self._restore_output(statement, saved_state) # depends on [control=['if'], data=['saved_state']]
if not already_redirecting:
self.redirecting = False # depends on [control=['if'], data=[]]
if pyscript_bridge_call:
# Stop saving command's stdout before command finalization hooks run
self.stdout.pause_storage = True # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except EmptyStatement:
# don't do anything, but do allow command finalization hooks to run
pass # depends on [control=['except'], data=[]]
except Exception as ex:
self.perror(ex) # depends on [control=['except'], data=['ex']]
finally:
return self._run_cmdfinalization_hooks(stop, statement) |
def _loadFromHStruct(self, dtype: HdlType, bitAddr: int):
"""
Parse HStruct type to this transaction template instance
:return: address of it's end
"""
for f in dtype.fields:
t = f.dtype
origin = f
isPadding = f.name is None
if isPadding:
width = t.bit_length()
bitAddr += width
else:
fi = TransTmpl(t, bitAddr, parent=self, origin=origin)
self.children.append(fi)
bitAddr = fi.bitAddrEnd
return bitAddr | def function[_loadFromHStruct, parameter[self, dtype, bitAddr]]:
constant[
Parse HStruct type to this transaction template instance
:return: address of it's end
]
for taget[name[f]] in starred[name[dtype].fields] begin[:]
variable[t] assign[=] name[f].dtype
variable[origin] assign[=] name[f]
variable[isPadding] assign[=] compare[name[f].name is constant[None]]
if name[isPadding] begin[:]
variable[width] assign[=] call[name[t].bit_length, parameter[]]
<ast.AugAssign object at 0x7da1b0388430>
return[name[bitAddr]] | keyword[def] identifier[_loadFromHStruct] ( identifier[self] , identifier[dtype] : identifier[HdlType] , identifier[bitAddr] : identifier[int] ):
literal[string]
keyword[for] identifier[f] keyword[in] identifier[dtype] . identifier[fields] :
identifier[t] = identifier[f] . identifier[dtype]
identifier[origin] = identifier[f]
identifier[isPadding] = identifier[f] . identifier[name] keyword[is] keyword[None]
keyword[if] identifier[isPadding] :
identifier[width] = identifier[t] . identifier[bit_length] ()
identifier[bitAddr] += identifier[width]
keyword[else] :
identifier[fi] = identifier[TransTmpl] ( identifier[t] , identifier[bitAddr] , identifier[parent] = identifier[self] , identifier[origin] = identifier[origin] )
identifier[self] . identifier[children] . identifier[append] ( identifier[fi] )
identifier[bitAddr] = identifier[fi] . identifier[bitAddrEnd]
keyword[return] identifier[bitAddr] | def _loadFromHStruct(self, dtype: HdlType, bitAddr: int):
"""
Parse HStruct type to this transaction template instance
:return: address of it's end
"""
for f in dtype.fields:
t = f.dtype
origin = f
isPadding = f.name is None
if isPadding:
width = t.bit_length()
bitAddr += width # depends on [control=['if'], data=[]]
else:
fi = TransTmpl(t, bitAddr, parent=self, origin=origin)
self.children.append(fi)
bitAddr = fi.bitAddrEnd # depends on [control=['for'], data=['f']]
return bitAddr |
def where(self, column, operator=Null(), value=None, boolean="and"):
"""
Add a where clause to the query
:param column: The column of the where clause, can also be a QueryBuilder instance for sub where
:type column: str|Builder
:param operator: The operator of the where clause
:type operator: str
:param value: The value of the where clause
:type value: mixed
:param boolean: The boolean of the where clause
:type boolean: str
:return: The current Builder instance
:rtype: Builder
"""
if isinstance(column, Builder):
self._query.add_nested_where_query(column.get_query(), boolean)
else:
self._query.where(column, operator, value, boolean)
return self | def function[where, parameter[self, column, operator, value, boolean]]:
constant[
Add a where clause to the query
:param column: The column of the where clause, can also be a QueryBuilder instance for sub where
:type column: str|Builder
:param operator: The operator of the where clause
:type operator: str
:param value: The value of the where clause
:type value: mixed
:param boolean: The boolean of the where clause
:type boolean: str
:return: The current Builder instance
:rtype: Builder
]
if call[name[isinstance], parameter[name[column], name[Builder]]] begin[:]
call[name[self]._query.add_nested_where_query, parameter[call[name[column].get_query, parameter[]], name[boolean]]]
return[name[self]] | keyword[def] identifier[where] ( identifier[self] , identifier[column] , identifier[operator] = identifier[Null] (), identifier[value] = keyword[None] , identifier[boolean] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[column] , identifier[Builder] ):
identifier[self] . identifier[_query] . identifier[add_nested_where_query] ( identifier[column] . identifier[get_query] (), identifier[boolean] )
keyword[else] :
identifier[self] . identifier[_query] . identifier[where] ( identifier[column] , identifier[operator] , identifier[value] , identifier[boolean] )
keyword[return] identifier[self] | def where(self, column, operator=Null(), value=None, boolean='and'):
"""
Add a where clause to the query
:param column: The column of the where clause, can also be a QueryBuilder instance for sub where
:type column: str|Builder
:param operator: The operator of the where clause
:type operator: str
:param value: The value of the where clause
:type value: mixed
:param boolean: The boolean of the where clause
:type boolean: str
:return: The current Builder instance
:rtype: Builder
"""
if isinstance(column, Builder):
self._query.add_nested_where_query(column.get_query(), boolean) # depends on [control=['if'], data=[]]
else:
self._query.where(column, operator, value, boolean)
return self |
def _FormatSocketInet32Token(self, token_data):
"""Formats an Internet socket token as a dictionary of values.
Args:
token_data (bsm_token_data_sockinet32): AUT_SOCKINET32 token data.
Returns:
dict[str, str]: token values.
"""
protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN')
ip_address = self._FormatPackedIPv4Address(token_data.ip_addresss)
return {
'protocols': protocol,
'family': token_data.socket_family,
'port': token_data.port_number,
'address': ip_address} | def function[_FormatSocketInet32Token, parameter[self, token_data]]:
constant[Formats an Internet socket token as a dictionary of values.
Args:
token_data (bsm_token_data_sockinet32): AUT_SOCKINET32 token data.
Returns:
dict[str, str]: token values.
]
variable[protocol] assign[=] call[name[bsmtoken].BSM_PROTOCOLS.get, parameter[name[token_data].socket_family, constant[UNKNOWN]]]
variable[ip_address] assign[=] call[name[self]._FormatPackedIPv4Address, parameter[name[token_data].ip_addresss]]
return[dictionary[[<ast.Constant object at 0x7da20e956470>, <ast.Constant object at 0x7da20e957a30>, <ast.Constant object at 0x7da20e957d60>, <ast.Constant object at 0x7da20e957a00>], [<ast.Name object at 0x7da20e9547f0>, <ast.Attribute object at 0x7da20e957430>, <ast.Attribute object at 0x7da20e954b50>, <ast.Name object at 0x7da20e9573a0>]]] | keyword[def] identifier[_FormatSocketInet32Token] ( identifier[self] , identifier[token_data] ):
literal[string]
identifier[protocol] = identifier[bsmtoken] . identifier[BSM_PROTOCOLS] . identifier[get] ( identifier[token_data] . identifier[socket_family] , literal[string] )
identifier[ip_address] = identifier[self] . identifier[_FormatPackedIPv4Address] ( identifier[token_data] . identifier[ip_addresss] )
keyword[return] {
literal[string] : identifier[protocol] ,
literal[string] : identifier[token_data] . identifier[socket_family] ,
literal[string] : identifier[token_data] . identifier[port_number] ,
literal[string] : identifier[ip_address] } | def _FormatSocketInet32Token(self, token_data):
"""Formats an Internet socket token as a dictionary of values.
Args:
token_data (bsm_token_data_sockinet32): AUT_SOCKINET32 token data.
Returns:
dict[str, str]: token values.
"""
protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN')
ip_address = self._FormatPackedIPv4Address(token_data.ip_addresss)
return {'protocols': protocol, 'family': token_data.socket_family, 'port': token_data.port_number, 'address': ip_address} |
def adjoint(self):
"""Adjoint of this operator, a `SamplingOperator`.
The ``'char_fun'`` variant of this operator corresponds to the
``'integrate'`` sampling operator, and ``'dirac'`` corresponds to
``'point_eval'``.
Examples
--------
>>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3))
>>> # Point (0, 0) occurs twice
>>> sampling_points = [[0, 1, 1, 0],
... [0, 1, 2, 0]]
>>> op = odl.WeightedSumSamplingOperator(space, sampling_points,
... variant='dirac')
>>> y = op.range.element([[1, 2, 3],
... [4, 5, 6]])
>>> op.adjoint(y)
rn(4).element([ 1., 5., 6., 1.])
>>> x = op.domain.element([1, 2, 3, 4])
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
>>> op = odl.WeightedSumSamplingOperator(space, sampling_points,
... variant='char_fun')
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
"""
if self.variant == 'dirac':
variant = 'point_eval'
elif self.variant == 'char_fun':
variant = 'integrate'
else:
raise RuntimeError('The variant "{!r}" is not yet supported'
''.format(self.variant))
return SamplingOperator(self.range, self.sampling_points, variant) | def function[adjoint, parameter[self]]:
constant[Adjoint of this operator, a `SamplingOperator`.
The ``'char_fun'`` variant of this operator corresponds to the
``'integrate'`` sampling operator, and ``'dirac'`` corresponds to
``'point_eval'``.
Examples
--------
>>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3))
>>> # Point (0, 0) occurs twice
>>> sampling_points = [[0, 1, 1, 0],
... [0, 1, 2, 0]]
>>> op = odl.WeightedSumSamplingOperator(space, sampling_points,
... variant='dirac')
>>> y = op.range.element([[1, 2, 3],
... [4, 5, 6]])
>>> op.adjoint(y)
rn(4).element([ 1., 5., 6., 1.])
>>> x = op.domain.element([1, 2, 3, 4])
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
>>> op = odl.WeightedSumSamplingOperator(space, sampling_points,
... variant='char_fun')
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
]
if compare[name[self].variant equal[==] constant[dirac]] begin[:]
variable[variant] assign[=] constant[point_eval]
return[call[name[SamplingOperator], parameter[name[self].range, name[self].sampling_points, name[variant]]]] | keyword[def] identifier[adjoint] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[variant] == literal[string] :
identifier[variant] = literal[string]
keyword[elif] identifier[self] . identifier[variant] == literal[string] :
identifier[variant] = literal[string]
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[variant] ))
keyword[return] identifier[SamplingOperator] ( identifier[self] . identifier[range] , identifier[self] . identifier[sampling_points] , identifier[variant] ) | def adjoint(self):
"""Adjoint of this operator, a `SamplingOperator`.
The ``'char_fun'`` variant of this operator corresponds to the
``'integrate'`` sampling operator, and ``'dirac'`` corresponds to
``'point_eval'``.
Examples
--------
>>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3))
>>> # Point (0, 0) occurs twice
>>> sampling_points = [[0, 1, 1, 0],
... [0, 1, 2, 0]]
>>> op = odl.WeightedSumSamplingOperator(space, sampling_points,
... variant='dirac')
>>> y = op.range.element([[1, 2, 3],
... [4, 5, 6]])
>>> op.adjoint(y)
rn(4).element([ 1., 5., 6., 1.])
>>> x = op.domain.element([1, 2, 3, 4])
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
>>> op = odl.WeightedSumSamplingOperator(space, sampling_points,
... variant='char_fun')
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
"""
if self.variant == 'dirac':
variant = 'point_eval' # depends on [control=['if'], data=[]]
elif self.variant == 'char_fun':
variant = 'integrate' # depends on [control=['if'], data=[]]
else:
raise RuntimeError('The variant "{!r}" is not yet supported'.format(self.variant))
return SamplingOperator(self.range, self.sampling_points, variant) |
def _generic_action_parser(self):
"""Generic parser for Actions."""
actions = []
while True:
action_code = unpack_ui8(self._src)
if action_code == 0:
break
action_name = ACTION_NAMES[action_code]
if action_code > 128:
# have a payload!
action_len = unpack_ui16(self._src)
try:
action_meth = getattr(
self, "_handle_" + action_name.lower())
except AttributeError:
if self.unknown_alert:
raise ValueError(
"Unknown action: " + repr(action_name))
action_payload = self._src.read(action_len)
_dict = {'__str__': _repr, '__repr__': _repr,
'name': action_name}
action = type("UnknownAction", (SWFObject,), _dict)()
action.raw_payload = action_payload
actions.append(action)
else:
prev_pos = self._src.tell()
for action in action_meth(action_len):
assert action is not None, action_name
actions.append(action)
quant_read = self._src.tell() - prev_pos
if quant_read != action_len:
raise RuntimeError(
"Bad bytes consumption by action {!r} handler "
"(did {}, should {})".format(
action_name, quant_read, action_len))
else:
action = _make_object(action_name)
actions.append(action)
return actions | def function[_generic_action_parser, parameter[self]]:
constant[Generic parser for Actions.]
variable[actions] assign[=] list[[]]
while constant[True] begin[:]
variable[action_code] assign[=] call[name[unpack_ui8], parameter[name[self]._src]]
if compare[name[action_code] equal[==] constant[0]] begin[:]
break
variable[action_name] assign[=] call[name[ACTION_NAMES]][name[action_code]]
if compare[name[action_code] greater[>] constant[128]] begin[:]
variable[action_len] assign[=] call[name[unpack_ui16], parameter[name[self]._src]]
<ast.Try object at 0x7da18bccb580>
return[name[actions]] | keyword[def] identifier[_generic_action_parser] ( identifier[self] ):
literal[string]
identifier[actions] =[]
keyword[while] keyword[True] :
identifier[action_code] = identifier[unpack_ui8] ( identifier[self] . identifier[_src] )
keyword[if] identifier[action_code] == literal[int] :
keyword[break]
identifier[action_name] = identifier[ACTION_NAMES] [ identifier[action_code] ]
keyword[if] identifier[action_code] > literal[int] :
identifier[action_len] = identifier[unpack_ui16] ( identifier[self] . identifier[_src] )
keyword[try] :
identifier[action_meth] = identifier[getattr] (
identifier[self] , literal[string] + identifier[action_name] . identifier[lower] ())
keyword[except] identifier[AttributeError] :
keyword[if] identifier[self] . identifier[unknown_alert] :
keyword[raise] identifier[ValueError] (
literal[string] + identifier[repr] ( identifier[action_name] ))
identifier[action_payload] = identifier[self] . identifier[_src] . identifier[read] ( identifier[action_len] )
identifier[_dict] ={ literal[string] : identifier[_repr] , literal[string] : identifier[_repr] ,
literal[string] : identifier[action_name] }
identifier[action] = identifier[type] ( literal[string] ,( identifier[SWFObject] ,), identifier[_dict] )()
identifier[action] . identifier[raw_payload] = identifier[action_payload]
identifier[actions] . identifier[append] ( identifier[action] )
keyword[else] :
identifier[prev_pos] = identifier[self] . identifier[_src] . identifier[tell] ()
keyword[for] identifier[action] keyword[in] identifier[action_meth] ( identifier[action_len] ):
keyword[assert] identifier[action] keyword[is] keyword[not] keyword[None] , identifier[action_name]
identifier[actions] . identifier[append] ( identifier[action] )
identifier[quant_read] = identifier[self] . identifier[_src] . identifier[tell] ()- identifier[prev_pos]
keyword[if] identifier[quant_read] != identifier[action_len] :
keyword[raise] identifier[RuntimeError] (
literal[string]
literal[string] . identifier[format] (
identifier[action_name] , identifier[quant_read] , identifier[action_len] ))
keyword[else] :
identifier[action] = identifier[_make_object] ( identifier[action_name] )
identifier[actions] . identifier[append] ( identifier[action] )
keyword[return] identifier[actions] | def _generic_action_parser(self):
"""Generic parser for Actions."""
actions = []
while True:
action_code = unpack_ui8(self._src)
if action_code == 0:
break # depends on [control=['if'], data=[]]
action_name = ACTION_NAMES[action_code]
if action_code > 128:
# have a payload!
action_len = unpack_ui16(self._src)
try:
action_meth = getattr(self, '_handle_' + action_name.lower()) # depends on [control=['try'], data=[]]
except AttributeError:
if self.unknown_alert:
raise ValueError('Unknown action: ' + repr(action_name)) # depends on [control=['if'], data=[]]
action_payload = self._src.read(action_len)
_dict = {'__str__': _repr, '__repr__': _repr, 'name': action_name}
action = type('UnknownAction', (SWFObject,), _dict)()
action.raw_payload = action_payload
actions.append(action) # depends on [control=['except'], data=[]]
else:
prev_pos = self._src.tell()
for action in action_meth(action_len):
assert action is not None, action_name
actions.append(action) # depends on [control=['for'], data=['action']]
quant_read = self._src.tell() - prev_pos
if quant_read != action_len:
raise RuntimeError('Bad bytes consumption by action {!r} handler (did {}, should {})'.format(action_name, quant_read, action_len)) # depends on [control=['if'], data=['quant_read', 'action_len']] # depends on [control=['if'], data=[]]
else:
action = _make_object(action_name)
actions.append(action) # depends on [control=['while'], data=[]]
return actions |
def begin(self, sql=None):
"""Begin a transaction."""
self._transaction = True
try:
begin = self._con.begin
except AttributeError:
return self._con.query(sql or 'begin')
else:
# use existing method if available
if sql:
return begin(sql=sql)
else:
return begin() | def function[begin, parameter[self, sql]]:
constant[Begin a transaction.]
name[self]._transaction assign[=] constant[True]
<ast.Try object at 0x7da20c6e77c0> | keyword[def] identifier[begin] ( identifier[self] , identifier[sql] = keyword[None] ):
literal[string]
identifier[self] . identifier[_transaction] = keyword[True]
keyword[try] :
identifier[begin] = identifier[self] . identifier[_con] . identifier[begin]
keyword[except] identifier[AttributeError] :
keyword[return] identifier[self] . identifier[_con] . identifier[query] ( identifier[sql] keyword[or] literal[string] )
keyword[else] :
keyword[if] identifier[sql] :
keyword[return] identifier[begin] ( identifier[sql] = identifier[sql] )
keyword[else] :
keyword[return] identifier[begin] () | def begin(self, sql=None):
"""Begin a transaction."""
self._transaction = True
try:
begin = self._con.begin # depends on [control=['try'], data=[]]
except AttributeError:
return self._con.query(sql or 'begin') # depends on [control=['except'], data=[]]
else:
# use existing method if available
if sql:
return begin(sql=sql) # depends on [control=['if'], data=[]]
else:
return begin() |
def draw_progress_bar(cb, message, value, max_value):
"""
:type cb: cursebox.Cursebox
"""
m_x = cb.width // 2
m_y = cb.height // 2
w = len(message) + 4
h = 3
draw_box(cb, m_x - w // 2, m_y - 1, w, h)
message = " %s " % message
i = int((value / max_value) * (len(message) + 2))
message = "$" + message[:i] + "$" + message[i:]
draw_text(cb, m_x - w // 2 + 1, m_y, message) | def function[draw_progress_bar, parameter[cb, message, value, max_value]]:
constant[
:type cb: cursebox.Cursebox
]
variable[m_x] assign[=] binary_operation[name[cb].width <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
variable[m_y] assign[=] binary_operation[name[cb].height <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
variable[w] assign[=] binary_operation[call[name[len], parameter[name[message]]] + constant[4]]
variable[h] assign[=] constant[3]
call[name[draw_box], parameter[name[cb], binary_operation[name[m_x] - binary_operation[name[w] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]], binary_operation[name[m_y] - constant[1]], name[w], name[h]]]
variable[message] assign[=] binary_operation[constant[ %s ] <ast.Mod object at 0x7da2590d6920> name[message]]
variable[i] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[value] / name[max_value]] * binary_operation[call[name[len], parameter[name[message]]] + constant[2]]]]]
variable[message] assign[=] binary_operation[binary_operation[binary_operation[constant[$] + call[name[message]][<ast.Slice object at 0x7da1b05bd360>]] + constant[$]] + call[name[message]][<ast.Slice object at 0x7da1b05bf340>]]
call[name[draw_text], parameter[name[cb], binary_operation[binary_operation[name[m_x] - binary_operation[name[w] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]] + constant[1]], name[m_y], name[message]]] | keyword[def] identifier[draw_progress_bar] ( identifier[cb] , identifier[message] , identifier[value] , identifier[max_value] ):
literal[string]
identifier[m_x] = identifier[cb] . identifier[width] // literal[int]
identifier[m_y] = identifier[cb] . identifier[height] // literal[int]
identifier[w] = identifier[len] ( identifier[message] )+ literal[int]
identifier[h] = literal[int]
identifier[draw_box] ( identifier[cb] , identifier[m_x] - identifier[w] // literal[int] , identifier[m_y] - literal[int] , identifier[w] , identifier[h] )
identifier[message] = literal[string] % identifier[message]
identifier[i] = identifier[int] (( identifier[value] / identifier[max_value] )*( identifier[len] ( identifier[message] )+ literal[int] ))
identifier[message] = literal[string] + identifier[message] [: identifier[i] ]+ literal[string] + identifier[message] [ identifier[i] :]
identifier[draw_text] ( identifier[cb] , identifier[m_x] - identifier[w] // literal[int] + literal[int] , identifier[m_y] , identifier[message] ) | def draw_progress_bar(cb, message, value, max_value):
"""
:type cb: cursebox.Cursebox
"""
m_x = cb.width // 2
m_y = cb.height // 2
w = len(message) + 4
h = 3
draw_box(cb, m_x - w // 2, m_y - 1, w, h)
message = ' %s ' % message
i = int(value / max_value * (len(message) + 2))
message = '$' + message[:i] + '$' + message[i:]
draw_text(cb, m_x - w // 2 + 1, m_y, message) |
def write_Text_into_file( text, old_file_name, out_dir, suffix='__split', verbose=True ):
''' Based on *old_file_name*, *suffix* and *out_dir*, constructs a new file name and
writes *text* (in the ascii normalised JSON format) into the new file.
'''
name = os.path.basename( old_file_name )
if '.' in name:
new_name = re.sub('\.([^.]+)$', suffix+'.\\1', name)
else:
new_name = name + suffix
new_path = os.path.join( out_dir, new_name )
start = timer()
#write_document( text, new_path ) # <--- this leaves indent=2 - takes too much extra space ...
o_f = codecs.open( new_path, mode='wb', encoding='ascii' )
o_f.write( json.dumps( text ) )
o_f.close()
end = timer()
timestamp = format_time( end-start )
if verbose:
print(' ==> '+new_path+' (file writing time: '+timestamp+')' ) | def function[write_Text_into_file, parameter[text, old_file_name, out_dir, suffix, verbose]]:
constant[ Based on *old_file_name*, *suffix* and *out_dir*, constructs a new file name and
writes *text* (in the ascii normalised JSON format) into the new file.
]
variable[name] assign[=] call[name[os].path.basename, parameter[name[old_file_name]]]
if compare[constant[.] in name[name]] begin[:]
variable[new_name] assign[=] call[name[re].sub, parameter[constant[\.([^.]+)$], binary_operation[name[suffix] + constant[.\1]], name[name]]]
variable[new_path] assign[=] call[name[os].path.join, parameter[name[out_dir], name[new_name]]]
variable[start] assign[=] call[name[timer], parameter[]]
variable[o_f] assign[=] call[name[codecs].open, parameter[name[new_path]]]
call[name[o_f].write, parameter[call[name[json].dumps, parameter[name[text]]]]]
call[name[o_f].close, parameter[]]
variable[end] assign[=] call[name[timer], parameter[]]
variable[timestamp] assign[=] call[name[format_time], parameter[binary_operation[name[end] - name[start]]]]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[ ==> ] + name[new_path]] + constant[ (file writing time: ]] + name[timestamp]] + constant[)]]]] | keyword[def] identifier[write_Text_into_file] ( identifier[text] , identifier[old_file_name] , identifier[out_dir] , identifier[suffix] = literal[string] , identifier[verbose] = keyword[True] ):
literal[string]
identifier[name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[old_file_name] )
keyword[if] literal[string] keyword[in] identifier[name] :
identifier[new_name] = identifier[re] . identifier[sub] ( literal[string] , identifier[suffix] + literal[string] , identifier[name] )
keyword[else] :
identifier[new_name] = identifier[name] + identifier[suffix]
identifier[new_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , identifier[new_name] )
identifier[start] = identifier[timer] ()
identifier[o_f] = identifier[codecs] . identifier[open] ( identifier[new_path] , identifier[mode] = literal[string] , identifier[encoding] = literal[string] )
identifier[o_f] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[text] ))
identifier[o_f] . identifier[close] ()
identifier[end] = identifier[timer] ()
identifier[timestamp] = identifier[format_time] ( identifier[end] - identifier[start] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] + identifier[new_path] + literal[string] + identifier[timestamp] + literal[string] ) | def write_Text_into_file(text, old_file_name, out_dir, suffix='__split', verbose=True):
""" Based on *old_file_name*, *suffix* and *out_dir*, constructs a new file name and
writes *text* (in the ascii normalised JSON format) into the new file.
"""
name = os.path.basename(old_file_name)
if '.' in name:
new_name = re.sub('\\.([^.]+)$', suffix + '.\\1', name) # depends on [control=['if'], data=['name']]
else:
new_name = name + suffix
new_path = os.path.join(out_dir, new_name)
start = timer()
#write_document( text, new_path ) # <--- this leaves indent=2 - takes too much extra space ...
o_f = codecs.open(new_path, mode='wb', encoding='ascii')
o_f.write(json.dumps(text))
o_f.close()
end = timer()
timestamp = format_time(end - start)
if verbose:
print(' ==> ' + new_path + ' (file writing time: ' + timestamp + ')') # depends on [control=['if'], data=[]] |
def t_INCLUDE(self, t):
'include'
#
# Now eat up the next two tokens which must be
# 1 - the name of the include file, and
# 2 - a terminating semicolon
#
# Then push the current lexer onto the stack, create a new one from
# the include file, and push it onto the stack.
#
# When we hit eof (the t_eof) rule, we pop.
next_token = self.lexer.token()
lineno = next_token.lineno
# print('NEXT', next, "next.value", next.value, type(next))
if isinstance(next_token.value, str):
incfile = next_token.value.strip('"')
else:
raise QasmError("Invalid include: must be a quoted string.")
if incfile in CORE_LIBS:
incfile = os.path.join(CORE_LIBS_PATH, incfile)
next_token = self.lexer.token()
if next_token is None or next_token.value != ';':
raise QasmError('Invalid syntax, missing ";" at line', str(lineno))
if not os.path.exists(incfile):
raise QasmError(
'Include file %s cannot be found, line %s, file %s' %
(incfile, str(next_token.lineno), self.filename))
self.push(incfile)
return self.lexer.token() | def function[t_INCLUDE, parameter[self, t]]:
constant[include]
variable[next_token] assign[=] call[name[self].lexer.token, parameter[]]
variable[lineno] assign[=] name[next_token].lineno
if call[name[isinstance], parameter[name[next_token].value, name[str]]] begin[:]
variable[incfile] assign[=] call[name[next_token].value.strip, parameter[constant["]]]
if compare[name[incfile] in name[CORE_LIBS]] begin[:]
variable[incfile] assign[=] call[name[os].path.join, parameter[name[CORE_LIBS_PATH], name[incfile]]]
variable[next_token] assign[=] call[name[self].lexer.token, parameter[]]
if <ast.BoolOp object at 0x7da1b03a8be0> begin[:]
<ast.Raise object at 0x7da1b03a8d60>
if <ast.UnaryOp object at 0x7da1b03a8ee0> begin[:]
<ast.Raise object at 0x7da1b03a9e70>
call[name[self].push, parameter[name[incfile]]]
return[call[name[self].lexer.token, parameter[]]] | keyword[def] identifier[t_INCLUDE] ( identifier[self] , identifier[t] ):
literal[string]
identifier[next_token] = identifier[self] . identifier[lexer] . identifier[token] ()
identifier[lineno] = identifier[next_token] . identifier[lineno]
keyword[if] identifier[isinstance] ( identifier[next_token] . identifier[value] , identifier[str] ):
identifier[incfile] = identifier[next_token] . identifier[value] . identifier[strip] ( literal[string] )
keyword[else] :
keyword[raise] identifier[QasmError] ( literal[string] )
keyword[if] identifier[incfile] keyword[in] identifier[CORE_LIBS] :
identifier[incfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[CORE_LIBS_PATH] , identifier[incfile] )
identifier[next_token] = identifier[self] . identifier[lexer] . identifier[token] ()
keyword[if] identifier[next_token] keyword[is] keyword[None] keyword[or] identifier[next_token] . identifier[value] != literal[string] :
keyword[raise] identifier[QasmError] ( literal[string] , identifier[str] ( identifier[lineno] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[incfile] ):
keyword[raise] identifier[QasmError] (
literal[string] %
( identifier[incfile] , identifier[str] ( identifier[next_token] . identifier[lineno] ), identifier[self] . identifier[filename] ))
identifier[self] . identifier[push] ( identifier[incfile] )
keyword[return] identifier[self] . identifier[lexer] . identifier[token] () | def t_INCLUDE(self, t):
"""include"""
#
# Now eat up the next two tokens which must be
# 1 - the name of the include file, and
# 2 - a terminating semicolon
#
# Then push the current lexer onto the stack, create a new one from
# the include file, and push it onto the stack.
#
# When we hit eof (the t_eof) rule, we pop.
next_token = self.lexer.token()
lineno = next_token.lineno
# print('NEXT', next, "next.value", next.value, type(next))
if isinstance(next_token.value, str):
incfile = next_token.value.strip('"') # depends on [control=['if'], data=[]]
else:
raise QasmError('Invalid include: must be a quoted string.')
if incfile in CORE_LIBS:
incfile = os.path.join(CORE_LIBS_PATH, incfile) # depends on [control=['if'], data=['incfile']]
next_token = self.lexer.token()
if next_token is None or next_token.value != ';':
raise QasmError('Invalid syntax, missing ";" at line', str(lineno)) # depends on [control=['if'], data=[]]
if not os.path.exists(incfile):
raise QasmError('Include file %s cannot be found, line %s, file %s' % (incfile, str(next_token.lineno), self.filename)) # depends on [control=['if'], data=[]]
self.push(incfile)
return self.lexer.token() |
def get_iso_time(date_part, time_part):
r"""Combign date and time into an iso datetime."""
str_date = datetime.datetime.strptime(
date_part, '%m/%d/%Y').strftime('%Y-%m-%d')
str_time = datetime.datetime.strptime(
time_part, '%I:%M %p').strftime('%H:%M:%S')
return str_date + "T" + str_time + "-7:00" | def function[get_iso_time, parameter[date_part, time_part]]:
constant[Combign date and time into an iso datetime.]
variable[str_date] assign[=] call[call[name[datetime].datetime.strptime, parameter[name[date_part], constant[%m/%d/%Y]]].strftime, parameter[constant[%Y-%m-%d]]]
variable[str_time] assign[=] call[call[name[datetime].datetime.strptime, parameter[name[time_part], constant[%I:%M %p]]].strftime, parameter[constant[%H:%M:%S]]]
return[binary_operation[binary_operation[binary_operation[name[str_date] + constant[T]] + name[str_time]] + constant[-7:00]]] | keyword[def] identifier[get_iso_time] ( identifier[date_part] , identifier[time_part] ):
literal[string]
identifier[str_date] = identifier[datetime] . identifier[datetime] . identifier[strptime] (
identifier[date_part] , literal[string] ). identifier[strftime] ( literal[string] )
identifier[str_time] = identifier[datetime] . identifier[datetime] . identifier[strptime] (
identifier[time_part] , literal[string] ). identifier[strftime] ( literal[string] )
keyword[return] identifier[str_date] + literal[string] + identifier[str_time] + literal[string] | def get_iso_time(date_part, time_part):
"""Combign date and time into an iso datetime."""
str_date = datetime.datetime.strptime(date_part, '%m/%d/%Y').strftime('%Y-%m-%d')
str_time = datetime.datetime.strptime(time_part, '%I:%M %p').strftime('%H:%M:%S')
return str_date + 'T' + str_time + '-7:00' |
def _do_tcp_check(self, ip, results):
"""
Attempt to establish a TCP connection.
If not successful, record the IP in the results dict.
Always closes the connection at the end.
"""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.connect((ip, self.conf['tcp_check_port']))
except:
# Any problem during the connection attempt? We won't diagnose it,
# we just indicate failure by adding the IP to the list
results.append(ip)
finally:
sock.close() | def function[_do_tcp_check, parameter[self, ip, results]]:
constant[
Attempt to establish a TCP connection.
If not successful, record the IP in the results dict.
Always closes the connection at the end.
]
<ast.Try object at 0x7da18c4ce050> | keyword[def] identifier[_do_tcp_check] ( identifier[self] , identifier[ip] , identifier[results] ):
literal[string]
keyword[try] :
identifier[sock] = identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_INET] , identifier[socket] . identifier[SOCK_STREAM] )
identifier[sock] . identifier[settimeout] ( literal[int] )
identifier[sock] . identifier[connect] (( identifier[ip] , identifier[self] . identifier[conf] [ literal[string] ]))
keyword[except] :
identifier[results] . identifier[append] ( identifier[ip] )
keyword[finally] :
identifier[sock] . identifier[close] () | def _do_tcp_check(self, ip, results):
"""
Attempt to establish a TCP connection.
If not successful, record the IP in the results dict.
Always closes the connection at the end.
"""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.connect((ip, self.conf['tcp_check_port'])) # depends on [control=['try'], data=[]]
except:
# Any problem during the connection attempt? We won't diagnose it,
# we just indicate failure by adding the IP to the list
results.append(ip) # depends on [control=['except'], data=[]]
finally:
sock.close() |
def center(self):
'''
Point whose coordinates are (midX,midY,origin.z), Point.
'''
return Point(self.midX, self.midY, self.origin.z) | def function[center, parameter[self]]:
constant[
Point whose coordinates are (midX,midY,origin.z), Point.
]
return[call[name[Point], parameter[name[self].midX, name[self].midY, name[self].origin.z]]] | keyword[def] identifier[center] ( identifier[self] ):
literal[string]
keyword[return] identifier[Point] ( identifier[self] . identifier[midX] , identifier[self] . identifier[midY] , identifier[self] . identifier[origin] . identifier[z] ) | def center(self):
"""
Point whose coordinates are (midX,midY,origin.z), Point.
"""
return Point(self.midX, self.midY, self.origin.z) |
def translate_row_col_to_index(self, row, col):
"""
Given a (row, col) tuple, return the corresponding index.
(Row and col params are 0-based.)
Negative row/col values are turned into zero.
"""
try:
result = self._line_start_indexes[row]
line = self.lines[row]
except IndexError:
if row < 0:
result = self._line_start_indexes[0]
line = self.lines[0]
else:
result = self._line_start_indexes[-1]
line = self.lines[-1]
result += max(0, min(col, len(line)))
# Keep in range. (len(self.text) is included, because the cursor can be
# right after the end of the text as well.)
result = max(0, min(result, len(self.text)))
return result | def function[translate_row_col_to_index, parameter[self, row, col]]:
constant[
Given a (row, col) tuple, return the corresponding index.
(Row and col params are 0-based.)
Negative row/col values are turned into zero.
]
<ast.Try object at 0x7da18f00c9a0>
<ast.AugAssign object at 0x7da18f58e680>
variable[result] assign[=] call[name[max], parameter[constant[0], call[name[min], parameter[name[result], call[name[len], parameter[name[self].text]]]]]]
return[name[result]] | keyword[def] identifier[translate_row_col_to_index] ( identifier[self] , identifier[row] , identifier[col] ):
literal[string]
keyword[try] :
identifier[result] = identifier[self] . identifier[_line_start_indexes] [ identifier[row] ]
identifier[line] = identifier[self] . identifier[lines] [ identifier[row] ]
keyword[except] identifier[IndexError] :
keyword[if] identifier[row] < literal[int] :
identifier[result] = identifier[self] . identifier[_line_start_indexes] [ literal[int] ]
identifier[line] = identifier[self] . identifier[lines] [ literal[int] ]
keyword[else] :
identifier[result] = identifier[self] . identifier[_line_start_indexes] [- literal[int] ]
identifier[line] = identifier[self] . identifier[lines] [- literal[int] ]
identifier[result] += identifier[max] ( literal[int] , identifier[min] ( identifier[col] , identifier[len] ( identifier[line] )))
identifier[result] = identifier[max] ( literal[int] , identifier[min] ( identifier[result] , identifier[len] ( identifier[self] . identifier[text] )))
keyword[return] identifier[result] | def translate_row_col_to_index(self, row, col):
"""
Given a (row, col) tuple, return the corresponding index.
(Row and col params are 0-based.)
Negative row/col values are turned into zero.
"""
try:
result = self._line_start_indexes[row]
line = self.lines[row] # depends on [control=['try'], data=[]]
except IndexError:
if row < 0:
result = self._line_start_indexes[0]
line = self.lines[0] # depends on [control=['if'], data=[]]
else:
result = self._line_start_indexes[-1]
line = self.lines[-1] # depends on [control=['except'], data=[]]
result += max(0, min(col, len(line)))
# Keep in range. (len(self.text) is included, because the cursor can be
# right after the end of the text as well.)
result = max(0, min(result, len(self.text)))
return result |
def percent_records_missing_location(user, method=None):
"""
Return the percentage of records missing a location parameter.
"""
if len(user.records) == 0:
return 0.
missing_locations = sum([1 for record in user.records if record.position._get_location(user) is None])
return float(missing_locations) / len(user.records) | def function[percent_records_missing_location, parameter[user, method]]:
constant[
Return the percentage of records missing a location parameter.
]
if compare[call[name[len], parameter[name[user].records]] equal[==] constant[0]] begin[:]
return[constant[0.0]]
variable[missing_locations] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b0d3d4e0>]]
return[binary_operation[call[name[float], parameter[name[missing_locations]]] / call[name[len], parameter[name[user].records]]]] | keyword[def] identifier[percent_records_missing_location] ( identifier[user] , identifier[method] = keyword[None] ):
literal[string]
keyword[if] identifier[len] ( identifier[user] . identifier[records] )== literal[int] :
keyword[return] literal[int]
identifier[missing_locations] = identifier[sum] ([ literal[int] keyword[for] identifier[record] keyword[in] identifier[user] . identifier[records] keyword[if] identifier[record] . identifier[position] . identifier[_get_location] ( identifier[user] ) keyword[is] keyword[None] ])
keyword[return] identifier[float] ( identifier[missing_locations] )/ identifier[len] ( identifier[user] . identifier[records] ) | def percent_records_missing_location(user, method=None):
"""
Return the percentage of records missing a location parameter.
"""
if len(user.records) == 0:
return 0.0 # depends on [control=['if'], data=[]]
missing_locations = sum([1 for record in user.records if record.position._get_location(user) is None])
return float(missing_locations) / len(user.records) |
def get_sub_dsp(self, nodes_bunch, edges_bunch=None):
"""
Returns the sub-dispatcher induced by given node and edge bunches.
The induced sub-dispatcher contains the available nodes in nodes_bunch
and edges between those nodes, excluding those that are in edges_bunch.
The available nodes are non isolated nodes and function nodes that have
all inputs and at least one output.
:param nodes_bunch:
A container of node ids which will be iterated through once.
:type nodes_bunch: list[str], iterable
:param edges_bunch:
A container of edge ids that will be removed.
:type edges_bunch: list[(str, str)], iterable, optional
:return:
A dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp_from_workflow`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a two functions `fun1` and `fun2`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['a', 'd'],
... outputs=['c', 'e'])
'fun2'
Get the sub-dispatcher induced by given nodes bunch::
>>> sub_dsp = dsp.get_sub_dsp(['a', 'c', 'd', 'e', 'fun2'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher'
"""
# Get real paths.
nodes_bunch = [self.get_node(u)[1][0] for u in nodes_bunch]
# Define an empty dispatcher.
sub_dsp = self.copy_structure(
dmap=self.dmap.subgraph(nodes_bunch).copy()
)
# Namespace shortcuts for speed.
nodes, dmap_out_degree = sub_dsp.nodes, sub_dsp.dmap.out_degree
dmap_dv, dmap_rm_edge = self.default_values, sub_dsp.dmap.remove_edge
dmap_rm_node = sub_dsp.dmap.remove_node
# Remove function nodes that has not whole inputs available.
for u in nodes_bunch:
n = nodes[u].get('inputs', None) # Function inputs.
# No all inputs
if n is not None and not set(n).issubset(nodes_bunch):
dmap_rm_node(u) # Remove function node.
# Remove edges that are not in edges_bunch.
if edges_bunch is not None:
for e in edges_bunch: # Iterate sub-graph edges.
dmap_rm_edge(*e) # Remove edge.
# Remove function node with no outputs.
for u in [u for u, n in sub_dsp.dmap.nodes.items()
if n['type'] == 'function']:
# noinspection PyCallingNonCallable
if not dmap_out_degree(u): # No outputs.
dmap_rm_node(u) # Remove function node.
from networkx import isolates
# Remove isolate nodes from sub-graph.
sub_dsp.dmap.remove_nodes_from(list(isolates(sub_dsp.dmap)))
# Set default values.
sub_dsp.default_values = {k: dmap_dv[k] for k in dmap_dv if k in nodes}
return sub_dsp | def function[get_sub_dsp, parameter[self, nodes_bunch, edges_bunch]]:
constant[
Returns the sub-dispatcher induced by given node and edge bunches.
The induced sub-dispatcher contains the available nodes in nodes_bunch
and edges between those nodes, excluding those that are in edges_bunch.
The available nodes are non isolated nodes and function nodes that have
all inputs and at least one output.
:param nodes_bunch:
A container of node ids which will be iterated through once.
:type nodes_bunch: list[str], iterable
:param edges_bunch:
A container of edge ids that will be removed.
:type edges_bunch: list[(str, str)], iterable, optional
:return:
A dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp_from_workflow`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a two functions `fun1` and `fun2`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['a', 'd'],
... outputs=['c', 'e'])
'fun2'
Get the sub-dispatcher induced by given nodes bunch::
>>> sub_dsp = dsp.get_sub_dsp(['a', 'c', 'd', 'e', 'fun2'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher'
]
variable[nodes_bunch] assign[=] <ast.ListComp object at 0x7da20c6aba30>
variable[sub_dsp] assign[=] call[name[self].copy_structure, parameter[]]
<ast.Tuple object at 0x7da1b25d3970> assign[=] tuple[[<ast.Attribute object at 0x7da1b25d1f90>, <ast.Attribute object at 0x7da1b25d29b0>]]
<ast.Tuple object at 0x7da1b25d1240> assign[=] tuple[[<ast.Attribute object at 0x7da1b25d37f0>, <ast.Attribute object at 0x7da1b25d1c30>]]
variable[dmap_rm_node] assign[=] name[sub_dsp].dmap.remove_node
for taget[name[u]] in starred[name[nodes_bunch]] begin[:]
variable[n] assign[=] call[call[name[nodes]][name[u]].get, parameter[constant[inputs], constant[None]]]
if <ast.BoolOp object at 0x7da1b25d0040> begin[:]
call[name[dmap_rm_node], parameter[name[u]]]
if compare[name[edges_bunch] is_not constant[None]] begin[:]
for taget[name[e]] in starred[name[edges_bunch]] begin[:]
call[name[dmap_rm_edge], parameter[<ast.Starred object at 0x7da1b25d24d0>]]
for taget[name[u]] in starred[<ast.ListComp object at 0x7da1b25d2350>] begin[:]
if <ast.UnaryOp object at 0x7da20c6e5870> begin[:]
call[name[dmap_rm_node], parameter[name[u]]]
from relative_module[networkx] import module[isolates]
call[name[sub_dsp].dmap.remove_nodes_from, parameter[call[name[list], parameter[call[name[isolates], parameter[name[sub_dsp].dmap]]]]]]
name[sub_dsp].default_values assign[=] <ast.DictComp object at 0x7da20c6e64a0>
return[name[sub_dsp]] | keyword[def] identifier[get_sub_dsp] ( identifier[self] , identifier[nodes_bunch] , identifier[edges_bunch] = keyword[None] ):
literal[string]
identifier[nodes_bunch] =[ identifier[self] . identifier[get_node] ( identifier[u] )[ literal[int] ][ literal[int] ] keyword[for] identifier[u] keyword[in] identifier[nodes_bunch] ]
identifier[sub_dsp] = identifier[self] . identifier[copy_structure] (
identifier[dmap] = identifier[self] . identifier[dmap] . identifier[subgraph] ( identifier[nodes_bunch] ). identifier[copy] ()
)
identifier[nodes] , identifier[dmap_out_degree] = identifier[sub_dsp] . identifier[nodes] , identifier[sub_dsp] . identifier[dmap] . identifier[out_degree]
identifier[dmap_dv] , identifier[dmap_rm_edge] = identifier[self] . identifier[default_values] , identifier[sub_dsp] . identifier[dmap] . identifier[remove_edge]
identifier[dmap_rm_node] = identifier[sub_dsp] . identifier[dmap] . identifier[remove_node]
keyword[for] identifier[u] keyword[in] identifier[nodes_bunch] :
identifier[n] = identifier[nodes] [ identifier[u] ]. identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[n] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[set] ( identifier[n] ). identifier[issubset] ( identifier[nodes_bunch] ):
identifier[dmap_rm_node] ( identifier[u] )
keyword[if] identifier[edges_bunch] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[e] keyword[in] identifier[edges_bunch] :
identifier[dmap_rm_edge] (* identifier[e] )
keyword[for] identifier[u] keyword[in] [ identifier[u] keyword[for] identifier[u] , identifier[n] keyword[in] identifier[sub_dsp] . identifier[dmap] . identifier[nodes] . identifier[items] ()
keyword[if] identifier[n] [ literal[string] ]== literal[string] ]:
keyword[if] keyword[not] identifier[dmap_out_degree] ( identifier[u] ):
identifier[dmap_rm_node] ( identifier[u] )
keyword[from] identifier[networkx] keyword[import] identifier[isolates]
identifier[sub_dsp] . identifier[dmap] . identifier[remove_nodes_from] ( identifier[list] ( identifier[isolates] ( identifier[sub_dsp] . identifier[dmap] )))
identifier[sub_dsp] . identifier[default_values] ={ identifier[k] : identifier[dmap_dv] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[dmap_dv] keyword[if] identifier[k] keyword[in] identifier[nodes] }
keyword[return] identifier[sub_dsp] | def get_sub_dsp(self, nodes_bunch, edges_bunch=None):
"""
Returns the sub-dispatcher induced by given node and edge bunches.
The induced sub-dispatcher contains the available nodes in nodes_bunch
and edges between those nodes, excluding those that are in edges_bunch.
The available nodes are non isolated nodes and function nodes that have
all inputs and at least one output.
:param nodes_bunch:
A container of node ids which will be iterated through once.
:type nodes_bunch: list[str], iterable
:param edges_bunch:
A container of edge ids that will be removed.
:type edges_bunch: list[(str, str)], iterable, optional
:return:
A dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp_from_workflow`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a two functions `fun1` and `fun2`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['a', 'd'],
... outputs=['c', 'e'])
'fun2'
Get the sub-dispatcher induced by given nodes bunch::
>>> sub_dsp = dsp.get_sub_dsp(['a', 'c', 'd', 'e', 'fun2'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher'
"""
# Get real paths.
nodes_bunch = [self.get_node(u)[1][0] for u in nodes_bunch]
# Define an empty dispatcher.
sub_dsp = self.copy_structure(dmap=self.dmap.subgraph(nodes_bunch).copy())
# Namespace shortcuts for speed.
(nodes, dmap_out_degree) = (sub_dsp.nodes, sub_dsp.dmap.out_degree)
(dmap_dv, dmap_rm_edge) = (self.default_values, sub_dsp.dmap.remove_edge)
dmap_rm_node = sub_dsp.dmap.remove_node
# Remove function nodes that has not whole inputs available.
for u in nodes_bunch:
n = nodes[u].get('inputs', None) # Function inputs.
# No all inputs
if n is not None and (not set(n).issubset(nodes_bunch)):
dmap_rm_node(u) # Remove function node. # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['u']]
# Remove edges that are not in edges_bunch.
if edges_bunch is not None:
for e in edges_bunch: # Iterate sub-graph edges.
dmap_rm_edge(*e) # Remove edge. # depends on [control=['for'], data=['e']] # depends on [control=['if'], data=['edges_bunch']]
# Remove function node with no outputs.
for u in [u for (u, n) in sub_dsp.dmap.nodes.items() if n['type'] == 'function']:
# noinspection PyCallingNonCallable
if not dmap_out_degree(u): # No outputs.
dmap_rm_node(u) # Remove function node. # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['u']]
from networkx import isolates
# Remove isolate nodes from sub-graph.
sub_dsp.dmap.remove_nodes_from(list(isolates(sub_dsp.dmap)))
# Set default values.
sub_dsp.default_values = {k: dmap_dv[k] for k in dmap_dv if k in nodes}
return sub_dsp |
def onAIOCompletion(self):
"""
Call when eventfd notified events are available.
"""
event_count = self.eventfd.read()
trace('eventfd reports %i events' % event_count)
# Even though eventfd signaled activity, even though it may give us
# some number of pending events, some events seem to have been already
# processed (maybe during io_cancel call ?).
# So do not trust eventfd value, and do not even trust that there must
# be even one event to process.
self._aio_context.getEvents(0) | def function[onAIOCompletion, parameter[self]]:
constant[
Call when eventfd notified events are available.
]
variable[event_count] assign[=] call[name[self].eventfd.read, parameter[]]
call[name[trace], parameter[binary_operation[constant[eventfd reports %i events] <ast.Mod object at 0x7da2590d6920> name[event_count]]]]
call[name[self]._aio_context.getEvents, parameter[constant[0]]] | keyword[def] identifier[onAIOCompletion] ( identifier[self] ):
literal[string]
identifier[event_count] = identifier[self] . identifier[eventfd] . identifier[read] ()
identifier[trace] ( literal[string] % identifier[event_count] )
identifier[self] . identifier[_aio_context] . identifier[getEvents] ( literal[int] ) | def onAIOCompletion(self):
"""
Call when eventfd notified events are available.
"""
event_count = self.eventfd.read()
trace('eventfd reports %i events' % event_count)
# Even though eventfd signaled activity, even though it may give us
# some number of pending events, some events seem to have been already
# processed (maybe during io_cancel call ?).
# So do not trust eventfd value, and do not even trust that there must
# be even one event to process.
self._aio_context.getEvents(0) |
def daterange_end(value):
'''Parse a date range end boundary'''
if not value:
return None
elif isinstance(value, datetime):
return value.date()
elif isinstance(value, date):
return value
result = parse_dt(value).date()
dashes = value.count('-')
if dashes >= 2:
# Full date
return result
elif dashes == 1:
# Year/Month
return result + relativedelta(months=+1, days=-1, day=1)
else:
# Year only
return result.replace(month=12, day=31) | def function[daterange_end, parameter[value]]:
constant[Parse a date range end boundary]
if <ast.UnaryOp object at 0x7da18f09f220> begin[:]
return[constant[None]]
variable[result] assign[=] call[call[name[parse_dt], parameter[name[value]]].date, parameter[]]
variable[dashes] assign[=] call[name[value].count, parameter[constant[-]]]
if compare[name[dashes] greater_or_equal[>=] constant[2]] begin[:]
return[name[result]] | keyword[def] identifier[daterange_end] ( identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[value] :
keyword[return] keyword[None]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[datetime] ):
keyword[return] identifier[value] . identifier[date] ()
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[date] ):
keyword[return] identifier[value]
identifier[result] = identifier[parse_dt] ( identifier[value] ). identifier[date] ()
identifier[dashes] = identifier[value] . identifier[count] ( literal[string] )
keyword[if] identifier[dashes] >= literal[int] :
keyword[return] identifier[result]
keyword[elif] identifier[dashes] == literal[int] :
keyword[return] identifier[result] + identifier[relativedelta] ( identifier[months] =+ literal[int] , identifier[days] =- literal[int] , identifier[day] = literal[int] )
keyword[else] :
keyword[return] identifier[result] . identifier[replace] ( identifier[month] = literal[int] , identifier[day] = literal[int] ) | def daterange_end(value):
"""Parse a date range end boundary"""
if not value:
return None # depends on [control=['if'], data=[]]
elif isinstance(value, datetime):
return value.date() # depends on [control=['if'], data=[]]
elif isinstance(value, date):
return value # depends on [control=['if'], data=[]]
result = parse_dt(value).date()
dashes = value.count('-')
if dashes >= 2:
# Full date
return result # depends on [control=['if'], data=[]]
elif dashes == 1:
# Year/Month
return result + relativedelta(months=+1, days=-1, day=1) # depends on [control=['if'], data=[]]
else:
# Year only
return result.replace(month=12, day=31) |
def fetch_libzmq(savedir):
"""download and extract libzmq"""
dest = pjoin(savedir, 'zeromq')
if os.path.exists(dest):
info("already have %s" % dest)
return
path = fetch_archive(savedir, libzmq_url, fname=libzmq, checksum=libzmq_checksum)
tf = tarfile.open(path)
with_version = pjoin(savedir, tf.firstmember.path)
tf.extractall(savedir)
tf.close()
# remove version suffix:
shutil.move(with_version, dest) | def function[fetch_libzmq, parameter[savedir]]:
constant[download and extract libzmq]
variable[dest] assign[=] call[name[pjoin], parameter[name[savedir], constant[zeromq]]]
if call[name[os].path.exists, parameter[name[dest]]] begin[:]
call[name[info], parameter[binary_operation[constant[already have %s] <ast.Mod object at 0x7da2590d6920> name[dest]]]]
return[None]
variable[path] assign[=] call[name[fetch_archive], parameter[name[savedir], name[libzmq_url]]]
variable[tf] assign[=] call[name[tarfile].open, parameter[name[path]]]
variable[with_version] assign[=] call[name[pjoin], parameter[name[savedir], name[tf].firstmember.path]]
call[name[tf].extractall, parameter[name[savedir]]]
call[name[tf].close, parameter[]]
call[name[shutil].move, parameter[name[with_version], name[dest]]] | keyword[def] identifier[fetch_libzmq] ( identifier[savedir] ):
literal[string]
identifier[dest] = identifier[pjoin] ( identifier[savedir] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[dest] ):
identifier[info] ( literal[string] % identifier[dest] )
keyword[return]
identifier[path] = identifier[fetch_archive] ( identifier[savedir] , identifier[libzmq_url] , identifier[fname] = identifier[libzmq] , identifier[checksum] = identifier[libzmq_checksum] )
identifier[tf] = identifier[tarfile] . identifier[open] ( identifier[path] )
identifier[with_version] = identifier[pjoin] ( identifier[savedir] , identifier[tf] . identifier[firstmember] . identifier[path] )
identifier[tf] . identifier[extractall] ( identifier[savedir] )
identifier[tf] . identifier[close] ()
identifier[shutil] . identifier[move] ( identifier[with_version] , identifier[dest] ) | def fetch_libzmq(savedir):
"""download and extract libzmq"""
dest = pjoin(savedir, 'zeromq')
if os.path.exists(dest):
info('already have %s' % dest)
return # depends on [control=['if'], data=[]]
path = fetch_archive(savedir, libzmq_url, fname=libzmq, checksum=libzmq_checksum)
tf = tarfile.open(path)
with_version = pjoin(savedir, tf.firstmember.path)
tf.extractall(savedir)
tf.close()
# remove version suffix:
shutil.move(with_version, dest) |
def check_for_wdiff():
"""
Checks if the `wdiff` command can be found.
Raises:
WdiffNotFoundError: if ``wdiff`` is not found.
"""
cmd = ['which', CMD_WDIFF]
DEVNULL = open(os.devnull, 'wb')
proc = sub.Popen(cmd, stdout=DEVNULL)
proc.wait()
DEVNULL.close()
if proc.returncode != 0:
msg = "the `{}` command can't be found".format(CMD_WDIFF)
raise WdiffNotFoundError(msg) | def function[check_for_wdiff, parameter[]]:
constant[
Checks if the `wdiff` command can be found.
Raises:
WdiffNotFoundError: if ``wdiff`` is not found.
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b23345b0>, <ast.Name object at 0x7da1b23349d0>]]
variable[DEVNULL] assign[=] call[name[open], parameter[name[os].devnull, constant[wb]]]
variable[proc] assign[=] call[name[sub].Popen, parameter[name[cmd]]]
call[name[proc].wait, parameter[]]
call[name[DEVNULL].close, parameter[]]
if compare[name[proc].returncode not_equal[!=] constant[0]] begin[:]
variable[msg] assign[=] call[constant[the `{}` command can't be found].format, parameter[name[CMD_WDIFF]]]
<ast.Raise object at 0x7da1b2335ab0> | keyword[def] identifier[check_for_wdiff] ():
literal[string]
identifier[cmd] =[ literal[string] , identifier[CMD_WDIFF] ]
identifier[DEVNULL] = identifier[open] ( identifier[os] . identifier[devnull] , literal[string] )
identifier[proc] = identifier[sub] . identifier[Popen] ( identifier[cmd] , identifier[stdout] = identifier[DEVNULL] )
identifier[proc] . identifier[wait] ()
identifier[DEVNULL] . identifier[close] ()
keyword[if] identifier[proc] . identifier[returncode] != literal[int] :
identifier[msg] = literal[string] . identifier[format] ( identifier[CMD_WDIFF] )
keyword[raise] identifier[WdiffNotFoundError] ( identifier[msg] ) | def check_for_wdiff():
"""
Checks if the `wdiff` command can be found.
Raises:
WdiffNotFoundError: if ``wdiff`` is not found.
"""
cmd = ['which', CMD_WDIFF]
DEVNULL = open(os.devnull, 'wb')
proc = sub.Popen(cmd, stdout=DEVNULL)
proc.wait()
DEVNULL.close()
if proc.returncode != 0:
msg = "the `{}` command can't be found".format(CMD_WDIFF)
raise WdiffNotFoundError(msg) # depends on [control=['if'], data=[]] |
def execute_lines(self, lines):
"""Execute lines and give focus to shell"""
self.shell.execute_lines(to_text_string(lines))
self.shell.setFocus() | def function[execute_lines, parameter[self, lines]]:
constant[Execute lines and give focus to shell]
call[name[self].shell.execute_lines, parameter[call[name[to_text_string], parameter[name[lines]]]]]
call[name[self].shell.setFocus, parameter[]] | keyword[def] identifier[execute_lines] ( identifier[self] , identifier[lines] ):
literal[string]
identifier[self] . identifier[shell] . identifier[execute_lines] ( identifier[to_text_string] ( identifier[lines] ))
identifier[self] . identifier[shell] . identifier[setFocus] () | def execute_lines(self, lines):
"""Execute lines and give focus to shell"""
self.shell.execute_lines(to_text_string(lines))
self.shell.setFocus() |
def find(self, package, **kwargs):
"""
Find method.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments.
Returns:
PackageSpec: the PackageSpec corresponding to the package, or None.
"""
spec = find_spec(package)
if spec is None:
return None
limit = []
if '.' in package:
package, limit = package.split('.', 1)
limit = [limit]
spec = find_spec(package)
if spec is not None:
if spec.submodule_search_locations:
path = spec.submodule_search_locations[0]
elif spec.origin and spec.origin != 'built-in':
path = spec.origin
else:
return None
return PackageSpec(spec.name, path, limit)
return None | def function[find, parameter[self, package]]:
constant[
Find method.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments.
Returns:
PackageSpec: the PackageSpec corresponding to the package, or None.
]
variable[spec] assign[=] call[name[find_spec], parameter[name[package]]]
if compare[name[spec] is constant[None]] begin[:]
return[constant[None]]
variable[limit] assign[=] list[[]]
if compare[constant[.] in name[package]] begin[:]
<ast.Tuple object at 0x7da18bc71c30> assign[=] call[name[package].split, parameter[constant[.], constant[1]]]
variable[limit] assign[=] list[[<ast.Name object at 0x7da18bc734f0>]]
variable[spec] assign[=] call[name[find_spec], parameter[name[package]]]
if compare[name[spec] is_not constant[None]] begin[:]
if name[spec].submodule_search_locations begin[:]
variable[path] assign[=] call[name[spec].submodule_search_locations][constant[0]]
return[call[name[PackageSpec], parameter[name[spec].name, name[path], name[limit]]]]
return[constant[None]] | keyword[def] identifier[find] ( identifier[self] , identifier[package] ,** identifier[kwargs] ):
literal[string]
identifier[spec] = identifier[find_spec] ( identifier[package] )
keyword[if] identifier[spec] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[limit] =[]
keyword[if] literal[string] keyword[in] identifier[package] :
identifier[package] , identifier[limit] = identifier[package] . identifier[split] ( literal[string] , literal[int] )
identifier[limit] =[ identifier[limit] ]
identifier[spec] = identifier[find_spec] ( identifier[package] )
keyword[if] identifier[spec] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[spec] . identifier[submodule_search_locations] :
identifier[path] = identifier[spec] . identifier[submodule_search_locations] [ literal[int] ]
keyword[elif] identifier[spec] . identifier[origin] keyword[and] identifier[spec] . identifier[origin] != literal[string] :
identifier[path] = identifier[spec] . identifier[origin]
keyword[else] :
keyword[return] keyword[None]
keyword[return] identifier[PackageSpec] ( identifier[spec] . identifier[name] , identifier[path] , identifier[limit] )
keyword[return] keyword[None] | def find(self, package, **kwargs):
"""
Find method.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments.
Returns:
PackageSpec: the PackageSpec corresponding to the package, or None.
"""
spec = find_spec(package)
if spec is None:
return None # depends on [control=['if'], data=[]]
limit = []
if '.' in package:
(package, limit) = package.split('.', 1)
limit = [limit]
spec = find_spec(package) # depends on [control=['if'], data=['package']]
if spec is not None:
if spec.submodule_search_locations:
path = spec.submodule_search_locations[0] # depends on [control=['if'], data=[]]
elif spec.origin and spec.origin != 'built-in':
path = spec.origin # depends on [control=['if'], data=[]]
else:
return None
return PackageSpec(spec.name, path, limit) # depends on [control=['if'], data=['spec']]
return None |
def half_light_radius_source(self, kwargs_source, center_x=0, center_y=0, deltaPix=None, numPix=None):
"""
computes numerically the half-light-radius of the deflector light and the total photon flux
:param kwargs_source:
:return:
"""
if numPix is None:
numPix = 1000
if deltaPix is None:
deltaPix = 0.005
x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix)
x_grid += center_x
y_grid += center_y
source_light = self.SourceModel.surface_brightness(x_grid, y_grid, kwargs_source)
R_h = analysis_util.half_light_radius(source_light, x_grid, y_grid, center_x=center_x, center_y=center_y)
return R_h | def function[half_light_radius_source, parameter[self, kwargs_source, center_x, center_y, deltaPix, numPix]]:
constant[
computes numerically the half-light-radius of the deflector light and the total photon flux
:param kwargs_source:
:return:
]
if compare[name[numPix] is constant[None]] begin[:]
variable[numPix] assign[=] constant[1000]
if compare[name[deltaPix] is constant[None]] begin[:]
variable[deltaPix] assign[=] constant[0.005]
<ast.Tuple object at 0x7da20c6e7fd0> assign[=] call[name[util].make_grid, parameter[]]
<ast.AugAssign object at 0x7da20c6e55a0>
<ast.AugAssign object at 0x7da20c6e7130>
variable[source_light] assign[=] call[name[self].SourceModel.surface_brightness, parameter[name[x_grid], name[y_grid], name[kwargs_source]]]
variable[R_h] assign[=] call[name[analysis_util].half_light_radius, parameter[name[source_light], name[x_grid], name[y_grid]]]
return[name[R_h]] | keyword[def] identifier[half_light_radius_source] ( identifier[self] , identifier[kwargs_source] , identifier[center_x] = literal[int] , identifier[center_y] = literal[int] , identifier[deltaPix] = keyword[None] , identifier[numPix] = keyword[None] ):
literal[string]
keyword[if] identifier[numPix] keyword[is] keyword[None] :
identifier[numPix] = literal[int]
keyword[if] identifier[deltaPix] keyword[is] keyword[None] :
identifier[deltaPix] = literal[int]
identifier[x_grid] , identifier[y_grid] = identifier[util] . identifier[make_grid] ( identifier[numPix] = identifier[numPix] , identifier[deltapix] = identifier[deltaPix] )
identifier[x_grid] += identifier[center_x]
identifier[y_grid] += identifier[center_y]
identifier[source_light] = identifier[self] . identifier[SourceModel] . identifier[surface_brightness] ( identifier[x_grid] , identifier[y_grid] , identifier[kwargs_source] )
identifier[R_h] = identifier[analysis_util] . identifier[half_light_radius] ( identifier[source_light] , identifier[x_grid] , identifier[y_grid] , identifier[center_x] = identifier[center_x] , identifier[center_y] = identifier[center_y] )
keyword[return] identifier[R_h] | def half_light_radius_source(self, kwargs_source, center_x=0, center_y=0, deltaPix=None, numPix=None):
"""
computes numerically the half-light-radius of the deflector light and the total photon flux
:param kwargs_source:
:return:
"""
if numPix is None:
numPix = 1000 # depends on [control=['if'], data=['numPix']]
if deltaPix is None:
deltaPix = 0.005 # depends on [control=['if'], data=['deltaPix']]
(x_grid, y_grid) = util.make_grid(numPix=numPix, deltapix=deltaPix)
x_grid += center_x
y_grid += center_y
source_light = self.SourceModel.surface_brightness(x_grid, y_grid, kwargs_source)
R_h = analysis_util.half_light_radius(source_light, x_grid, y_grid, center_x=center_x, center_y=center_y)
return R_h |
async def send_schema(self, schema_data_json: str) -> str:
"""
Send schema to ledger, then retrieve it as written to the ledger and return it.
Raise BadLedgerTxn on failure. Raise BadAttribute for attribute name with spaces or
reserved for indy-sdk.
If schema already exists on ledger, log error and return schema.
:param schema_data_json: schema data json with name, version, attribute names; e.g.,
::
{
'name': 'my-schema',
'version': '1.234',
'attr_names': ['favourite_drink', 'height', 'last_visit_date']
}
:return: schema json as written to ledger (or existed a priori)
"""
LOGGER.debug('Origin.send_schema >>> schema_data_json: %s', schema_data_json)
schema_data = json.loads(schema_data_json)
for attr in schema_data['attr_names']:
if not (re.match(r'(?=[^- ])[-_a-zA-Z0-9 ]+(?<=[^- ])$', attr)) or attr.strip().lower() == 'hash':
LOGGER.debug('Origin.send_schema <!< Bad attribute name [%s]', attr)
raise BadAttribute('Bad attribute name [{}]'.format(attr))
s_id = schema_id(self.did, schema_data['name'], schema_data['version'])
s_key = schema_key(s_id)
rv_json = None
with SCHEMA_CACHE.lock:
try:
rv_json = await self.get_schema(s_key)
LOGGER.error(
'Schema %s version %s already exists on ledger for origin-did %s: not sending',
schema_data['name'],
schema_data['version'],
self.did)
except AbsentSchema: # OK - about to create and send it
(_, schema_json) = await anoncreds.issuer_create_schema(
self.did,
schema_data['name'],
schema_data['version'],
json.dumps(schema_data['attr_names']))
req_json = await ledger.build_schema_request(self.did, schema_json)
await self._sign_submit(req_json)
for _ in range(16): # reasonable timeout
try:
rv_json = await self.get_schema(s_key) # adds to cache
break
except AbsentSchema:
await sleep(1)
LOGGER.info('Sent schema %s to ledger, waiting 1s for its appearance', s_id)
if not rv_json:
LOGGER.debug('Origin.send_schema <!< timed out waiting on sent schema %s', s_id)
raise BadLedgerTxn('Timed out waiting on sent schema {}'.format(s_id))
LOGGER.debug('Origin.send_schema <<< %s', rv_json)
return rv_json | <ast.AsyncFunctionDef object at 0x7da20c6a89a0> | keyword[async] keyword[def] identifier[send_schema] ( identifier[self] , identifier[schema_data_json] : identifier[str] )-> identifier[str] :
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[schema_data_json] )
identifier[schema_data] = identifier[json] . identifier[loads] ( identifier[schema_data_json] )
keyword[for] identifier[attr] keyword[in] identifier[schema_data] [ literal[string] ]:
keyword[if] keyword[not] ( identifier[re] . identifier[match] ( literal[string] , identifier[attr] )) keyword[or] identifier[attr] . identifier[strip] (). identifier[lower] ()== literal[string] :
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[attr] )
keyword[raise] identifier[BadAttribute] ( literal[string] . identifier[format] ( identifier[attr] ))
identifier[s_id] = identifier[schema_id] ( identifier[self] . identifier[did] , identifier[schema_data] [ literal[string] ], identifier[schema_data] [ literal[string] ])
identifier[s_key] = identifier[schema_key] ( identifier[s_id] )
identifier[rv_json] = keyword[None]
keyword[with] identifier[SCHEMA_CACHE] . identifier[lock] :
keyword[try] :
identifier[rv_json] = keyword[await] identifier[self] . identifier[get_schema] ( identifier[s_key] )
identifier[LOGGER] . identifier[error] (
literal[string] ,
identifier[schema_data] [ literal[string] ],
identifier[schema_data] [ literal[string] ],
identifier[self] . identifier[did] )
keyword[except] identifier[AbsentSchema] :
( identifier[_] , identifier[schema_json] )= keyword[await] identifier[anoncreds] . identifier[issuer_create_schema] (
identifier[self] . identifier[did] ,
identifier[schema_data] [ literal[string] ],
identifier[schema_data] [ literal[string] ],
identifier[json] . identifier[dumps] ( identifier[schema_data] [ literal[string] ]))
identifier[req_json] = keyword[await] identifier[ledger] . identifier[build_schema_request] ( identifier[self] . identifier[did] , identifier[schema_json] )
keyword[await] identifier[self] . identifier[_sign_submit] ( identifier[req_json] )
keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] ):
keyword[try] :
identifier[rv_json] = keyword[await] identifier[self] . identifier[get_schema] ( identifier[s_key] )
keyword[break]
keyword[except] identifier[AbsentSchema] :
keyword[await] identifier[sleep] ( literal[int] )
identifier[LOGGER] . identifier[info] ( literal[string] , identifier[s_id] )
keyword[if] keyword[not] identifier[rv_json] :
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[s_id] )
keyword[raise] identifier[BadLedgerTxn] ( literal[string] . identifier[format] ( identifier[s_id] ))
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[rv_json] )
keyword[return] identifier[rv_json] | async def send_schema(self, schema_data_json: str) -> str:
"""
Send schema to ledger, then retrieve it as written to the ledger and return it.
Raise BadLedgerTxn on failure. Raise BadAttribute for attribute name with spaces or
reserved for indy-sdk.
If schema already exists on ledger, log error and return schema.
:param schema_data_json: schema data json with name, version, attribute names; e.g.,
::
{
'name': 'my-schema',
'version': '1.234',
'attr_names': ['favourite_drink', 'height', 'last_visit_date']
}
:return: schema json as written to ledger (or existed a priori)
"""
LOGGER.debug('Origin.send_schema >>> schema_data_json: %s', schema_data_json)
schema_data = json.loads(schema_data_json)
for attr in schema_data['attr_names']:
if not re.match('(?=[^- ])[-_a-zA-Z0-9 ]+(?<=[^- ])$', attr) or attr.strip().lower() == 'hash':
LOGGER.debug('Origin.send_schema <!< Bad attribute name [%s]', attr)
raise BadAttribute('Bad attribute name [{}]'.format(attr)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['attr']]
s_id = schema_id(self.did, schema_data['name'], schema_data['version'])
s_key = schema_key(s_id)
rv_json = None
with SCHEMA_CACHE.lock:
try:
rv_json = await self.get_schema(s_key)
LOGGER.error('Schema %s version %s already exists on ledger for origin-did %s: not sending', schema_data['name'], schema_data['version'], self.did) # depends on [control=['try'], data=[]]
except AbsentSchema: # OK - about to create and send it
(_, schema_json) = await anoncreds.issuer_create_schema(self.did, schema_data['name'], schema_data['version'], json.dumps(schema_data['attr_names']))
req_json = await ledger.build_schema_request(self.did, schema_json)
await self._sign_submit(req_json)
for _ in range(16): # reasonable timeout
try:
rv_json = await self.get_schema(s_key) # adds to cache
break # depends on [control=['try'], data=[]]
except AbsentSchema:
await sleep(1)
LOGGER.info('Sent schema %s to ledger, waiting 1s for its appearance', s_id) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
if not rv_json:
LOGGER.debug('Origin.send_schema <!< timed out waiting on sent schema %s', s_id)
raise BadLedgerTxn('Timed out waiting on sent schema {}'.format(s_id)) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['with'], data=[]]
LOGGER.debug('Origin.send_schema <<< %s', rv_json)
return rv_json |
def load_hash_configuration(self, hash_name):
"""
Loads and returns hash configuration
"""
conf = self.mongo_object.find_one(
{'hash_conf_name': hash_name + '_conf'}
)
return pickle.loads(conf['hash_configuration']) if conf is not None\
else None | def function[load_hash_configuration, parameter[self, hash_name]]:
constant[
Loads and returns hash configuration
]
variable[conf] assign[=] call[name[self].mongo_object.find_one, parameter[dictionary[[<ast.Constant object at 0x7da1b0866380>], [<ast.BinOp object at 0x7da1b0864bb0>]]]]
return[<ast.IfExp object at 0x7da1b0866410>] | keyword[def] identifier[load_hash_configuration] ( identifier[self] , identifier[hash_name] ):
literal[string]
identifier[conf] = identifier[self] . identifier[mongo_object] . identifier[find_one] (
{ literal[string] : identifier[hash_name] + literal[string] }
)
keyword[return] identifier[pickle] . identifier[loads] ( identifier[conf] [ literal[string] ]) keyword[if] identifier[conf] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None] | def load_hash_configuration(self, hash_name):
"""
Loads and returns hash configuration
"""
conf = self.mongo_object.find_one({'hash_conf_name': hash_name + '_conf'})
return pickle.loads(conf['hash_configuration']) if conf is not None else None |
def get_process_hardware_breakpoints(self, dwProcessId):
"""
@see: L{get_thread_hardware_breakpoints}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@rtype: list of tuple( int, L{HardwareBreakpoint} )
@return: All hardware breakpoints for each thread in the given process
as a list of tuples (tid, bp).
"""
result = list()
aProcess = self.system.get_process(dwProcessId)
for dwThreadId in aProcess.iter_thread_ids():
if dwThreadId in self.__hardwareBP:
bplist = self.__hardwareBP[dwThreadId]
for bp in bplist:
result.append( (dwThreadId, bp) )
return result | def function[get_process_hardware_breakpoints, parameter[self, dwProcessId]]:
constant[
@see: L{get_thread_hardware_breakpoints}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@rtype: list of tuple( int, L{HardwareBreakpoint} )
@return: All hardware breakpoints for each thread in the given process
as a list of tuples (tid, bp).
]
variable[result] assign[=] call[name[list], parameter[]]
variable[aProcess] assign[=] call[name[self].system.get_process, parameter[name[dwProcessId]]]
for taget[name[dwThreadId]] in starred[call[name[aProcess].iter_thread_ids, parameter[]]] begin[:]
if compare[name[dwThreadId] in name[self].__hardwareBP] begin[:]
variable[bplist] assign[=] call[name[self].__hardwareBP][name[dwThreadId]]
for taget[name[bp]] in starred[name[bplist]] begin[:]
call[name[result].append, parameter[tuple[[<ast.Name object at 0x7da1b06fad10>, <ast.Name object at 0x7da1b06fb040>]]]]
return[name[result]] | keyword[def] identifier[get_process_hardware_breakpoints] ( identifier[self] , identifier[dwProcessId] ):
literal[string]
identifier[result] = identifier[list] ()
identifier[aProcess] = identifier[self] . identifier[system] . identifier[get_process] ( identifier[dwProcessId] )
keyword[for] identifier[dwThreadId] keyword[in] identifier[aProcess] . identifier[iter_thread_ids] ():
keyword[if] identifier[dwThreadId] keyword[in] identifier[self] . identifier[__hardwareBP] :
identifier[bplist] = identifier[self] . identifier[__hardwareBP] [ identifier[dwThreadId] ]
keyword[for] identifier[bp] keyword[in] identifier[bplist] :
identifier[result] . identifier[append] (( identifier[dwThreadId] , identifier[bp] ))
keyword[return] identifier[result] | def get_process_hardware_breakpoints(self, dwProcessId):
"""
@see: L{get_thread_hardware_breakpoints}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@rtype: list of tuple( int, L{HardwareBreakpoint} )
@return: All hardware breakpoints for each thread in the given process
as a list of tuples (tid, bp).
"""
result = list()
aProcess = self.system.get_process(dwProcessId)
for dwThreadId in aProcess.iter_thread_ids():
if dwThreadId in self.__hardwareBP:
bplist = self.__hardwareBP[dwThreadId]
for bp in bplist:
result.append((dwThreadId, bp)) # depends on [control=['for'], data=['bp']] # depends on [control=['if'], data=['dwThreadId']] # depends on [control=['for'], data=['dwThreadId']]
return result |
def protein_ids(self, contig=None, strand=None):
"""
What are all the protein IDs
(optionally restrict to a given chromosome and/or strand)
"""
protein_ids = self._all_feature_values(
column="protein_id",
feature="CDS",
contig=contig,
strand=strand,
distinct=True)
# drop None values
return [protein_id for protein_id in protein_ids if protein_id] | def function[protein_ids, parameter[self, contig, strand]]:
constant[
What are all the protein IDs
(optionally restrict to a given chromosome and/or strand)
]
variable[protein_ids] assign[=] call[name[self]._all_feature_values, parameter[]]
return[<ast.ListComp object at 0x7da1b07175e0>] | keyword[def] identifier[protein_ids] ( identifier[self] , identifier[contig] = keyword[None] , identifier[strand] = keyword[None] ):
literal[string]
identifier[protein_ids] = identifier[self] . identifier[_all_feature_values] (
identifier[column] = literal[string] ,
identifier[feature] = literal[string] ,
identifier[contig] = identifier[contig] ,
identifier[strand] = identifier[strand] ,
identifier[distinct] = keyword[True] )
keyword[return] [ identifier[protein_id] keyword[for] identifier[protein_id] keyword[in] identifier[protein_ids] keyword[if] identifier[protein_id] ] | def protein_ids(self, contig=None, strand=None):
"""
What are all the protein IDs
(optionally restrict to a given chromosome and/or strand)
"""
protein_ids = self._all_feature_values(column='protein_id', feature='CDS', contig=contig, strand=strand, distinct=True)
# drop None values
return [protein_id for protein_id in protein_ids if protein_id] |
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: %s"
% str(uniques)) | def function[check_consistent_length, parameter[]]:
constant[Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
]
variable[uniques] assign[=] call[name[np].unique, parameter[<ast.ListComp object at 0x7da1b004dd50>]]
if compare[call[name[len], parameter[name[uniques]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1afe0f280> | keyword[def] identifier[check_consistent_length] (* identifier[arrays] ):
literal[string]
identifier[uniques] = identifier[np] . identifier[unique] ([ identifier[_num_samples] ( identifier[X] ) keyword[for] identifier[X] keyword[in] identifier[arrays] keyword[if] identifier[X] keyword[is] keyword[not] keyword[None] ])
keyword[if] identifier[len] ( identifier[uniques] )> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
% identifier[str] ( identifier[uniques] )) | def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError('Found arrays with inconsistent numbers of samples: %s' % str(uniques)) # depends on [control=['if'], data=[]] |
def convert_iris(directory, output_directory, output_filename='iris.hdf5'):
"""Convert the Iris dataset to HDF5.
Converts the Iris dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.Iris`. The converted dataset is
saved as 'iris.hdf5'.
This method assumes the existence of the file `iris.data`.
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
classes = {b'Iris-setosa': 0, b'Iris-versicolor': 1, b'Iris-virginica': 2}
data = numpy.loadtxt(
os.path.join(directory, 'iris.data'),
converters={4: lambda x: classes[x]},
delimiter=',')
features = data[:, :-1].astype('float32')
targets = data[:, -1].astype('uint8').reshape((-1, 1))
data = (('all', 'features', features),
('all', 'targets', targets))
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'feature'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,) | def function[convert_iris, parameter[directory, output_directory, output_filename]]:
constant[Convert the Iris dataset to HDF5.
Converts the Iris dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.Iris`. The converted dataset is
saved as 'iris.hdf5'.
This method assumes the existence of the file `iris.data`.
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
]
variable[classes] assign[=] dictionary[[<ast.Constant object at 0x7da207f004c0>, <ast.Constant object at 0x7da207f026e0>, <ast.Constant object at 0x7da207f017e0>], [<ast.Constant object at 0x7da207f02bf0>, <ast.Constant object at 0x7da207f03820>, <ast.Constant object at 0x7da207f030a0>]]
variable[data] assign[=] call[name[numpy].loadtxt, parameter[call[name[os].path.join, parameter[name[directory], constant[iris.data]]]]]
variable[features] assign[=] call[call[name[data]][tuple[[<ast.Slice object at 0x7da20e9b01f0>, <ast.Slice object at 0x7da20e9b19c0>]]].astype, parameter[constant[float32]]]
variable[targets] assign[=] call[call[call[name[data]][tuple[[<ast.Slice object at 0x7da20e9b3eb0>, <ast.UnaryOp object at 0x7da1b2345390>]]].astype, parameter[constant[uint8]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b2344220>, <ast.Constant object at 0x7da1b2344f10>]]]]
variable[data] assign[=] tuple[[<ast.Tuple object at 0x7da1b23446d0>, <ast.Tuple object at 0x7da1b2345570>]]
variable[output_path] assign[=] call[name[os].path.join, parameter[name[output_directory], name[output_filename]]]
variable[h5file] assign[=] call[name[h5py].File, parameter[name[output_path]]]
call[name[fill_hdf5_file], parameter[name[h5file], name[data]]]
call[call[name[h5file]][constant[features]].dims][constant[0]].label assign[=] constant[batch]
call[call[name[h5file]][constant[features]].dims][constant[1]].label assign[=] constant[feature]
call[call[name[h5file]][constant[targets]].dims][constant[0]].label assign[=] constant[batch]
call[call[name[h5file]][constant[targets]].dims][constant[1]].label assign[=] constant[index]
call[name[h5file].flush, parameter[]]
call[name[h5file].close, parameter[]]
return[tuple[[<ast.Name object at 0x7da20c9934f0>]]] | keyword[def] identifier[convert_iris] ( identifier[directory] , identifier[output_directory] , identifier[output_filename] = literal[string] ):
literal[string]
identifier[classes] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }
identifier[data] = identifier[numpy] . identifier[loadtxt] (
identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , literal[string] ),
identifier[converters] ={ literal[int] : keyword[lambda] identifier[x] : identifier[classes] [ identifier[x] ]},
identifier[delimiter] = literal[string] )
identifier[features] = identifier[data] [:,:- literal[int] ]. identifier[astype] ( literal[string] )
identifier[targets] = identifier[data] [:,- literal[int] ]. identifier[astype] ( literal[string] ). identifier[reshape] ((- literal[int] , literal[int] ))
identifier[data] =(( literal[string] , literal[string] , identifier[features] ),
( literal[string] , literal[string] , identifier[targets] ))
identifier[output_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[output_directory] , identifier[output_filename] )
identifier[h5file] = identifier[h5py] . identifier[File] ( identifier[output_path] , identifier[mode] = literal[string] )
identifier[fill_hdf5_file] ( identifier[h5file] , identifier[data] )
identifier[h5file] [ literal[string] ]. identifier[dims] [ literal[int] ]. identifier[label] = literal[string]
identifier[h5file] [ literal[string] ]. identifier[dims] [ literal[int] ]. identifier[label] = literal[string]
identifier[h5file] [ literal[string] ]. identifier[dims] [ literal[int] ]. identifier[label] = literal[string]
identifier[h5file] [ literal[string] ]. identifier[dims] [ literal[int] ]. identifier[label] = literal[string]
identifier[h5file] . identifier[flush] ()
identifier[h5file] . identifier[close] ()
keyword[return] ( identifier[output_path] ,) | def convert_iris(directory, output_directory, output_filename='iris.hdf5'):
"""Convert the Iris dataset to HDF5.
Converts the Iris dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.Iris`. The converted dataset is
saved as 'iris.hdf5'.
This method assumes the existence of the file `iris.data`.
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
classes = {b'Iris-setosa': 0, b'Iris-versicolor': 1, b'Iris-virginica': 2}
data = numpy.loadtxt(os.path.join(directory, 'iris.data'), converters={4: lambda x: classes[x]}, delimiter=',')
features = data[:, :-1].astype('float32')
targets = data[:, -1].astype('uint8').reshape((-1, 1))
data = (('all', 'features', features), ('all', 'targets', targets))
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'feature'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,) |
def draw_strokes(stroke_based_drawings):
"""
Visualizes drawings (ground truth or predictions) by
returning images to represent the stroke-based data from
the user.
Parameters
----------
stroke_based_drawings: SArray or list
An `SArray` of type `list`. Each element in the SArray
should be a list of strokes, where each stroke is a list
of points, and each point is represented as a dictionary
with two keys, "x" and "y". A single stroke-based drawing
is also supported, in which case, the type of the input
would be list.
Returns
-------
drawings: SArray or _tc.Image
Each stroke-based drawing is converted into a 28x28
grayscale drawing for the user to visualize what their
strokes traced.
"""
single_input = False
if (not isinstance(stroke_based_drawings, _tc.SArray)
and not isinstance(stroke_based_drawings, list)):
raise _ToolkitError("Input to draw_strokes must be of type "
+ "turicreate.SArray or list (for a single stroke-based drawing)")
if (isinstance(stroke_based_drawings, _tc.SArray)
and stroke_based_drawings.dtype != list):
raise _ToolkitError("SArray input to draw_strokes must have dtype "
+ "list. Each element in the SArray should be a list of strokes, "
+ "where each stroke is a list of points, "
+ "and each point is represented as a dictionary "
+ "with two keys, \"x\" and \"y\".")
if isinstance(stroke_based_drawings, list):
single_input = True
stroke_based_drawings = _tc.SArray([stroke_based_drawings])
sf = _tc.SFrame({"drawings": stroke_based_drawings})
sf_with_drawings = _extensions._drawing_classifier_prepare_data(
sf, "drawings")
if single_input:
return sf_with_drawings["drawings"][0]
return sf_with_drawings["drawings"] | def function[draw_strokes, parameter[stroke_based_drawings]]:
constant[
Visualizes drawings (ground truth or predictions) by
returning images to represent the stroke-based data from
the user.
Parameters
----------
stroke_based_drawings: SArray or list
An `SArray` of type `list`. Each element in the SArray
should be a list of strokes, where each stroke is a list
of points, and each point is represented as a dictionary
with two keys, "x" and "y". A single stroke-based drawing
is also supported, in which case, the type of the input
would be list.
Returns
-------
drawings: SArray or _tc.Image
Each stroke-based drawing is converted into a 28x28
grayscale drawing for the user to visualize what their
strokes traced.
]
variable[single_input] assign[=] constant[False]
if <ast.BoolOp object at 0x7da2049602e0> begin[:]
<ast.Raise object at 0x7da2049638e0>
if <ast.BoolOp object at 0x7da204962d70> begin[:]
<ast.Raise object at 0x7da204960790>
if call[name[isinstance], parameter[name[stroke_based_drawings], name[list]]] begin[:]
variable[single_input] assign[=] constant[True]
variable[stroke_based_drawings] assign[=] call[name[_tc].SArray, parameter[list[[<ast.Name object at 0x7da204961150>]]]]
variable[sf] assign[=] call[name[_tc].SFrame, parameter[dictionary[[<ast.Constant object at 0x7da2049607c0>], [<ast.Name object at 0x7da204963940>]]]]
variable[sf_with_drawings] assign[=] call[name[_extensions]._drawing_classifier_prepare_data, parameter[name[sf], constant[drawings]]]
if name[single_input] begin[:]
return[call[call[name[sf_with_drawings]][constant[drawings]]][constant[0]]]
return[call[name[sf_with_drawings]][constant[drawings]]] | keyword[def] identifier[draw_strokes] ( identifier[stroke_based_drawings] ):
literal[string]
identifier[single_input] = keyword[False]
keyword[if] ( keyword[not] identifier[isinstance] ( identifier[stroke_based_drawings] , identifier[_tc] . identifier[SArray] )
keyword[and] keyword[not] identifier[isinstance] ( identifier[stroke_based_drawings] , identifier[list] )):
keyword[raise] identifier[_ToolkitError] ( literal[string]
+ literal[string] )
keyword[if] ( identifier[isinstance] ( identifier[stroke_based_drawings] , identifier[_tc] . identifier[SArray] )
keyword[and] identifier[stroke_based_drawings] . identifier[dtype] != identifier[list] ):
keyword[raise] identifier[_ToolkitError] ( literal[string]
+ literal[string]
+ literal[string]
+ literal[string]
+ literal[string] )
keyword[if] identifier[isinstance] ( identifier[stroke_based_drawings] , identifier[list] ):
identifier[single_input] = keyword[True]
identifier[stroke_based_drawings] = identifier[_tc] . identifier[SArray] ([ identifier[stroke_based_drawings] ])
identifier[sf] = identifier[_tc] . identifier[SFrame] ({ literal[string] : identifier[stroke_based_drawings] })
identifier[sf_with_drawings] = identifier[_extensions] . identifier[_drawing_classifier_prepare_data] (
identifier[sf] , literal[string] )
keyword[if] identifier[single_input] :
keyword[return] identifier[sf_with_drawings] [ literal[string] ][ literal[int] ]
keyword[return] identifier[sf_with_drawings] [ literal[string] ] | def draw_strokes(stroke_based_drawings):
"""
Visualizes drawings (ground truth or predictions) by
returning images to represent the stroke-based data from
the user.
Parameters
----------
stroke_based_drawings: SArray or list
An `SArray` of type `list`. Each element in the SArray
should be a list of strokes, where each stroke is a list
of points, and each point is represented as a dictionary
with two keys, "x" and "y". A single stroke-based drawing
is also supported, in which case, the type of the input
would be list.
Returns
-------
drawings: SArray or _tc.Image
Each stroke-based drawing is converted into a 28x28
grayscale drawing for the user to visualize what their
strokes traced.
"""
single_input = False
if not isinstance(stroke_based_drawings, _tc.SArray) and (not isinstance(stroke_based_drawings, list)):
raise _ToolkitError('Input to draw_strokes must be of type ' + 'turicreate.SArray or list (for a single stroke-based drawing)') # depends on [control=['if'], data=[]]
if isinstance(stroke_based_drawings, _tc.SArray) and stroke_based_drawings.dtype != list:
raise _ToolkitError('SArray input to draw_strokes must have dtype ' + 'list. Each element in the SArray should be a list of strokes, ' + 'where each stroke is a list of points, ' + 'and each point is represented as a dictionary ' + 'with two keys, "x" and "y".') # depends on [control=['if'], data=[]]
if isinstance(stroke_based_drawings, list):
single_input = True
stroke_based_drawings = _tc.SArray([stroke_based_drawings]) # depends on [control=['if'], data=[]]
sf = _tc.SFrame({'drawings': stroke_based_drawings})
sf_with_drawings = _extensions._drawing_classifier_prepare_data(sf, 'drawings')
if single_input:
return sf_with_drawings['drawings'][0] # depends on [control=['if'], data=[]]
return sf_with_drawings['drawings'] |
def reload_extension(self, name):
"""Atomically reloads an extension.
This replaces the extension with the same extension, only refreshed. This is
equivalent to a :meth:`unload_extension` followed by a :meth:`load_extension`
except done in an atomic way. That is, if an operation fails mid-reload then
the bot will roll-back to the prior working state.
Parameters
------------
name: :class:`str`
The extension name to reload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
ExtensionNotFound
The extension could not be imported.
NoEntryPointError
The extension does not have a setup function.
ExtensionFailed
The extension setup function had an execution error.
"""
lib = self.__extensions.get(name)
if lib is None:
raise errors.ExtensionNotLoaded(name)
# get the previous module states from sys modules
modules = {
name: module
for name, module in sys.modules.items()
if _is_submodule(lib.__name__, name)
}
try:
# Unload and then load the module...
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
self.load_extension(name)
except Exception as e:
# if the load failed, the remnants should have been
# cleaned from the load_extension function call
# so let's load it from our old compiled library.
self._load_from_module_spec(lib, name)
# revert sys.modules back to normal and raise back to caller
sys.modules.update(modules)
raise | def function[reload_extension, parameter[self, name]]:
constant[Atomically reloads an extension.
This replaces the extension with the same extension, only refreshed. This is
equivalent to a :meth:`unload_extension` followed by a :meth:`load_extension`
except done in an atomic way. That is, if an operation fails mid-reload then
the bot will roll-back to the prior working state.
Parameters
------------
name: :class:`str`
The extension name to reload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
ExtensionNotFound
The extension could not be imported.
NoEntryPointError
The extension does not have a setup function.
ExtensionFailed
The extension setup function had an execution error.
]
variable[lib] assign[=] call[name[self].__extensions.get, parameter[name[name]]]
if compare[name[lib] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1ff0b20>
variable[modules] assign[=] <ast.DictComp object at 0x7da1b1ff0430>
<ast.Try object at 0x7da1b1ff0970> | keyword[def] identifier[reload_extension] ( identifier[self] , identifier[name] ):
literal[string]
identifier[lib] = identifier[self] . identifier[__extensions] . identifier[get] ( identifier[name] )
keyword[if] identifier[lib] keyword[is] keyword[None] :
keyword[raise] identifier[errors] . identifier[ExtensionNotLoaded] ( identifier[name] )
identifier[modules] ={
identifier[name] : identifier[module]
keyword[for] identifier[name] , identifier[module] keyword[in] identifier[sys] . identifier[modules] . identifier[items] ()
keyword[if] identifier[_is_submodule] ( identifier[lib] . identifier[__name__] , identifier[name] )
}
keyword[try] :
identifier[self] . identifier[_remove_module_references] ( identifier[lib] . identifier[__name__] )
identifier[self] . identifier[_call_module_finalizers] ( identifier[lib] , identifier[name] )
identifier[self] . identifier[load_extension] ( identifier[name] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[_load_from_module_spec] ( identifier[lib] , identifier[name] )
identifier[sys] . identifier[modules] . identifier[update] ( identifier[modules] )
keyword[raise] | def reload_extension(self, name):
"""Atomically reloads an extension.
This replaces the extension with the same extension, only refreshed. This is
equivalent to a :meth:`unload_extension` followed by a :meth:`load_extension`
except done in an atomic way. That is, if an operation fails mid-reload then
the bot will roll-back to the prior working state.
Parameters
------------
name: :class:`str`
The extension name to reload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
ExtensionNotFound
The extension could not be imported.
NoEntryPointError
The extension does not have a setup function.
ExtensionFailed
The extension setup function had an execution error.
"""
lib = self.__extensions.get(name)
if lib is None:
raise errors.ExtensionNotLoaded(name) # depends on [control=['if'], data=[]]
# get the previous module states from sys modules
modules = {name: module for (name, module) in sys.modules.items() if _is_submodule(lib.__name__, name)}
try:
# Unload and then load the module...
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
self.load_extension(name) # depends on [control=['try'], data=[]]
except Exception as e:
# if the load failed, the remnants should have been
# cleaned from the load_extension function call
# so let's load it from our old compiled library.
self._load_from_module_spec(lib, name)
# revert sys.modules back to normal and raise back to caller
sys.modules.update(modules)
raise # depends on [control=['except'], data=[]] |
def enable_precompute(panel):
"""Schedule a precompute task for `panel`"""
use_metis = panel['data_source']['source_type'] == 'querybuilder'
if use_metis:
query = panel['data_source']['query']
else:
query = "u'''%s'''" % panel['data_source']['code']
precompute = panel['data_source']['precompute']
timeframe = panel['data_source']['timeframe']
bucket_width = precompute['bucket_width']['value']
time_scale = precompute['bucket_width']['scale']['name']
bucket_width_seconds = get_seconds(bucket_width, time_scale)
if timeframe['mode']['value'] == 'recent':
untrusted_time = precompute['untrusted_time']['value']
untrusted_time_scale = precompute['untrusted_time']['scale']['name']
untrusted_time_seconds = get_seconds(untrusted_time, untrusted_time_scale)
# Schedule the task with an interval equal to the bucket_width
interval = bucket_width_seconds
elif timeframe['mode']['value'] == 'range':
untrusted_time_seconds = 0
# Schedule the task with an interval of 0 so it only runs once
interval = 0
task_code = PRECOMPUTE_INITIALIZATION_CODE % (query, timeframe,
bucket_width_seconds,
untrusted_time_seconds,
use_metis)
result = scheduler_client.schedule(task_code, interval)
if result['status'] != 'success':
raise RuntimeError(result.get('reason'))
return result['id'] | def function[enable_precompute, parameter[panel]]:
constant[Schedule a precompute task for `panel`]
variable[use_metis] assign[=] compare[call[call[name[panel]][constant[data_source]]][constant[source_type]] equal[==] constant[querybuilder]]
if name[use_metis] begin[:]
variable[query] assign[=] call[call[name[panel]][constant[data_source]]][constant[query]]
variable[precompute] assign[=] call[call[name[panel]][constant[data_source]]][constant[precompute]]
variable[timeframe] assign[=] call[call[name[panel]][constant[data_source]]][constant[timeframe]]
variable[bucket_width] assign[=] call[call[name[precompute]][constant[bucket_width]]][constant[value]]
variable[time_scale] assign[=] call[call[call[name[precompute]][constant[bucket_width]]][constant[scale]]][constant[name]]
variable[bucket_width_seconds] assign[=] call[name[get_seconds], parameter[name[bucket_width], name[time_scale]]]
if compare[call[call[name[timeframe]][constant[mode]]][constant[value]] equal[==] constant[recent]] begin[:]
variable[untrusted_time] assign[=] call[call[name[precompute]][constant[untrusted_time]]][constant[value]]
variable[untrusted_time_scale] assign[=] call[call[call[name[precompute]][constant[untrusted_time]]][constant[scale]]][constant[name]]
variable[untrusted_time_seconds] assign[=] call[name[get_seconds], parameter[name[untrusted_time], name[untrusted_time_scale]]]
variable[interval] assign[=] name[bucket_width_seconds]
variable[task_code] assign[=] binary_operation[name[PRECOMPUTE_INITIALIZATION_CODE] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204620b50>, <ast.Name object at 0x7da2046228c0>, <ast.Name object at 0x7da204622dd0>, <ast.Name object at 0x7da204620bb0>, <ast.Name object at 0x7da204622a10>]]]
variable[result] assign[=] call[name[scheduler_client].schedule, parameter[name[task_code], name[interval]]]
if compare[call[name[result]][constant[status]] not_equal[!=] constant[success]] begin[:]
<ast.Raise object at 0x7da20c6c6500>
return[call[name[result]][constant[id]]] | keyword[def] identifier[enable_precompute] ( identifier[panel] ):
literal[string]
identifier[use_metis] = identifier[panel] [ literal[string] ][ literal[string] ]== literal[string]
keyword[if] identifier[use_metis] :
identifier[query] = identifier[panel] [ literal[string] ][ literal[string] ]
keyword[else] :
identifier[query] = literal[string] % identifier[panel] [ literal[string] ][ literal[string] ]
identifier[precompute] = identifier[panel] [ literal[string] ][ literal[string] ]
identifier[timeframe] = identifier[panel] [ literal[string] ][ literal[string] ]
identifier[bucket_width] = identifier[precompute] [ literal[string] ][ literal[string] ]
identifier[time_scale] = identifier[precompute] [ literal[string] ][ literal[string] ][ literal[string] ]
identifier[bucket_width_seconds] = identifier[get_seconds] ( identifier[bucket_width] , identifier[time_scale] )
keyword[if] identifier[timeframe] [ literal[string] ][ literal[string] ]== literal[string] :
identifier[untrusted_time] = identifier[precompute] [ literal[string] ][ literal[string] ]
identifier[untrusted_time_scale] = identifier[precompute] [ literal[string] ][ literal[string] ][ literal[string] ]
identifier[untrusted_time_seconds] = identifier[get_seconds] ( identifier[untrusted_time] , identifier[untrusted_time_scale] )
identifier[interval] = identifier[bucket_width_seconds]
keyword[elif] identifier[timeframe] [ literal[string] ][ literal[string] ]== literal[string] :
identifier[untrusted_time_seconds] = literal[int]
identifier[interval] = literal[int]
identifier[task_code] = identifier[PRECOMPUTE_INITIALIZATION_CODE] %( identifier[query] , identifier[timeframe] ,
identifier[bucket_width_seconds] ,
identifier[untrusted_time_seconds] ,
identifier[use_metis] )
identifier[result] = identifier[scheduler_client] . identifier[schedule] ( identifier[task_code] , identifier[interval] )
keyword[if] identifier[result] [ literal[string] ]!= literal[string] :
keyword[raise] identifier[RuntimeError] ( identifier[result] . identifier[get] ( literal[string] ))
keyword[return] identifier[result] [ literal[string] ] | def enable_precompute(panel):
"""Schedule a precompute task for `panel`"""
use_metis = panel['data_source']['source_type'] == 'querybuilder'
if use_metis:
query = panel['data_source']['query'] # depends on [control=['if'], data=[]]
else:
query = "u'''%s'''" % panel['data_source']['code']
precompute = panel['data_source']['precompute']
timeframe = panel['data_source']['timeframe']
bucket_width = precompute['bucket_width']['value']
time_scale = precompute['bucket_width']['scale']['name']
bucket_width_seconds = get_seconds(bucket_width, time_scale)
if timeframe['mode']['value'] == 'recent':
untrusted_time = precompute['untrusted_time']['value']
untrusted_time_scale = precompute['untrusted_time']['scale']['name']
untrusted_time_seconds = get_seconds(untrusted_time, untrusted_time_scale)
# Schedule the task with an interval equal to the bucket_width
interval = bucket_width_seconds # depends on [control=['if'], data=[]]
elif timeframe['mode']['value'] == 'range':
untrusted_time_seconds = 0
# Schedule the task with an interval of 0 so it only runs once
interval = 0 # depends on [control=['if'], data=[]]
task_code = PRECOMPUTE_INITIALIZATION_CODE % (query, timeframe, bucket_width_seconds, untrusted_time_seconds, use_metis)
result = scheduler_client.schedule(task_code, interval)
if result['status'] != 'success':
raise RuntimeError(result.get('reason')) # depends on [control=['if'], data=[]]
return result['id'] |
def get(self, name: str, default: Any = None) -> Any:
"""Return the first value, either the default or actual"""
return super().get(name, [default])[0] | def function[get, parameter[self, name, default]]:
constant[Return the first value, either the default or actual]
return[call[call[call[name[super], parameter[]].get, parameter[name[name], list[[<ast.Name object at 0x7da1b1616e00>]]]]][constant[0]]] | keyword[def] identifier[get] ( identifier[self] , identifier[name] : identifier[str] , identifier[default] : identifier[Any] = keyword[None] )-> identifier[Any] :
literal[string]
keyword[return] identifier[super] (). identifier[get] ( identifier[name] ,[ identifier[default] ])[ literal[int] ] | def get(self, name: str, default: Any=None) -> Any:
"""Return the first value, either the default or actual"""
return super().get(name, [default])[0] |
def _build_flags(self):
"""
Function builds kwargs variable for run_window
"""
# Check if all entries for selected arguments are nonempty
for arg_dict in [x for x in self.args.values() if self.arg_is_selected(x)]:
if 'entry' in arg_dict and not arg_dict['entry'].get_text():
self.gui_helper.execute_dialog("Entry {0} is empty".format(arg_dict['label']))
return False
# Check for active CheckButtons
for arg_dict in [x for x in self.args.values() if self.arg_is_selected(x)]:
arg_name = arg_dict['arg'].get_dest()
if 'entry' in arg_dict:
self.kwargs[arg_name] = arg_dict['entry'].get_text()
else:
if arg_dict['arg'].get_gui_hint('type') == 'const':
self.kwargs[arg_name] = arg_dict['arg'].kwargs['const']
else:
self.kwargs[arg_name] = True
# Check for non active CheckButtons but with defaults flag
for arg_dict in [x for x in self.args.values() if not self.arg_is_selected(x)]:
arg_name = arg_dict['arg'].get_dest()
if 'default' in arg_dict['arg'].kwargs:
self.kwargs[arg_name] = arg_dict['arg'].get_gui_hint('default')
elif arg_name in self.kwargs:
del self.kwargs[arg_name]
return True | def function[_build_flags, parameter[self]]:
constant[
Function builds kwargs variable for run_window
]
for taget[name[arg_dict]] in starred[<ast.ListComp object at 0x7da1b0fae7a0>] begin[:]
if <ast.BoolOp object at 0x7da1b0fae620> begin[:]
call[name[self].gui_helper.execute_dialog, parameter[call[constant[Entry {0} is empty].format, parameter[call[name[arg_dict]][constant[label]]]]]]
return[constant[False]]
for taget[name[arg_dict]] in starred[<ast.ListComp object at 0x7da1b0fadab0>] begin[:]
variable[arg_name] assign[=] call[call[name[arg_dict]][constant[arg]].get_dest, parameter[]]
if compare[constant[entry] in name[arg_dict]] begin[:]
call[name[self].kwargs][name[arg_name]] assign[=] call[call[name[arg_dict]][constant[entry]].get_text, parameter[]]
for taget[name[arg_dict]] in starred[<ast.ListComp object at 0x7da1b0faf340>] begin[:]
variable[arg_name] assign[=] call[call[name[arg_dict]][constant[arg]].get_dest, parameter[]]
if compare[constant[default] in call[name[arg_dict]][constant[arg]].kwargs] begin[:]
call[name[self].kwargs][name[arg_name]] assign[=] call[call[name[arg_dict]][constant[arg]].get_gui_hint, parameter[constant[default]]]
return[constant[True]] | keyword[def] identifier[_build_flags] ( identifier[self] ):
literal[string]
keyword[for] identifier[arg_dict] keyword[in] [ identifier[x] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[args] . identifier[values] () keyword[if] identifier[self] . identifier[arg_is_selected] ( identifier[x] )]:
keyword[if] literal[string] keyword[in] identifier[arg_dict] keyword[and] keyword[not] identifier[arg_dict] [ literal[string] ]. identifier[get_text] ():
identifier[self] . identifier[gui_helper] . identifier[execute_dialog] ( literal[string] . identifier[format] ( identifier[arg_dict] [ literal[string] ]))
keyword[return] keyword[False]
keyword[for] identifier[arg_dict] keyword[in] [ identifier[x] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[args] . identifier[values] () keyword[if] identifier[self] . identifier[arg_is_selected] ( identifier[x] )]:
identifier[arg_name] = identifier[arg_dict] [ literal[string] ]. identifier[get_dest] ()
keyword[if] literal[string] keyword[in] identifier[arg_dict] :
identifier[self] . identifier[kwargs] [ identifier[arg_name] ]= identifier[arg_dict] [ literal[string] ]. identifier[get_text] ()
keyword[else] :
keyword[if] identifier[arg_dict] [ literal[string] ]. identifier[get_gui_hint] ( literal[string] )== literal[string] :
identifier[self] . identifier[kwargs] [ identifier[arg_name] ]= identifier[arg_dict] [ literal[string] ]. identifier[kwargs] [ literal[string] ]
keyword[else] :
identifier[self] . identifier[kwargs] [ identifier[arg_name] ]= keyword[True]
keyword[for] identifier[arg_dict] keyword[in] [ identifier[x] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[args] . identifier[values] () keyword[if] keyword[not] identifier[self] . identifier[arg_is_selected] ( identifier[x] )]:
identifier[arg_name] = identifier[arg_dict] [ literal[string] ]. identifier[get_dest] ()
keyword[if] literal[string] keyword[in] identifier[arg_dict] [ literal[string] ]. identifier[kwargs] :
identifier[self] . identifier[kwargs] [ identifier[arg_name] ]= identifier[arg_dict] [ literal[string] ]. identifier[get_gui_hint] ( literal[string] )
keyword[elif] identifier[arg_name] keyword[in] identifier[self] . identifier[kwargs] :
keyword[del] identifier[self] . identifier[kwargs] [ identifier[arg_name] ]
keyword[return] keyword[True] | def _build_flags(self):
"""
Function builds kwargs variable for run_window
"""
# Check if all entries for selected arguments are nonempty
for arg_dict in [x for x in self.args.values() if self.arg_is_selected(x)]:
if 'entry' in arg_dict and (not arg_dict['entry'].get_text()):
self.gui_helper.execute_dialog('Entry {0} is empty'.format(arg_dict['label']))
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['arg_dict']]
# Check for active CheckButtons
for arg_dict in [x for x in self.args.values() if self.arg_is_selected(x)]:
arg_name = arg_dict['arg'].get_dest()
if 'entry' in arg_dict:
self.kwargs[arg_name] = arg_dict['entry'].get_text() # depends on [control=['if'], data=['arg_dict']]
elif arg_dict['arg'].get_gui_hint('type') == 'const':
self.kwargs[arg_name] = arg_dict['arg'].kwargs['const'] # depends on [control=['if'], data=[]]
else:
self.kwargs[arg_name] = True # depends on [control=['for'], data=['arg_dict']]
# Check for non active CheckButtons but with defaults flag
for arg_dict in [x for x in self.args.values() if not self.arg_is_selected(x)]:
arg_name = arg_dict['arg'].get_dest()
if 'default' in arg_dict['arg'].kwargs:
self.kwargs[arg_name] = arg_dict['arg'].get_gui_hint('default') # depends on [control=['if'], data=[]]
elif arg_name in self.kwargs:
del self.kwargs[arg_name] # depends on [control=['if'], data=['arg_name']] # depends on [control=['for'], data=['arg_dict']]
return True |
def add_jump(self, name, min, max, num, warp=None, var_type=float):
""" An integer/float-valued enumerable with `num` items, bounded
between [`min`, `max`]. Note that the right endpoint of the interval
includes `max`. This is a wrapper around the add_enum. `jump` can be
a float or int.
"""
if not isinstance(var_type, type):
if var_type == 'int':
var_type = int
elif var_type == 'float':
var_type = float
else:
raise ValueError('var_type (%s) is not supported. use '
'"int" or "float",' % (var_type))
min, max = map(var_type, (min, max))
num = int(num)
if not warp:
choices = np.linspace(min, max, num=num, dtype=var_type)
elif (min >= 0) and warp == 'log':
choices = np.logspace(np.log10(min), np.log10(max), num=num,
dtype=var_type)
elif (min <= 0)and warp == 'log':
raise ValueError('variable %s: log-warping requires min > 0')
else:
raise ValueError('variable %s: warp=%s is not supported. use '
'None or "log",' % (name, warp))
self.variables[name] = EnumVariable(name, choices.tolist()) | def function[add_jump, parameter[self, name, min, max, num, warp, var_type]]:
constant[ An integer/float-valued enumerable with `num` items, bounded
between [`min`, `max`]. Note that the right endpoint of the interval
includes `max`. This is a wrapper around the add_enum. `jump` can be
a float or int.
]
if <ast.UnaryOp object at 0x7da1b00f7c40> begin[:]
if compare[name[var_type] equal[==] constant[int]] begin[:]
variable[var_type] assign[=] name[int]
<ast.Tuple object at 0x7da1b00f6080> assign[=] call[name[map], parameter[name[var_type], tuple[[<ast.Name object at 0x7da1b00f5780>, <ast.Name object at 0x7da1b00f7fa0>]]]]
variable[num] assign[=] call[name[int], parameter[name[num]]]
if <ast.UnaryOp object at 0x7da1b00f7520> begin[:]
variable[choices] assign[=] call[name[np].linspace, parameter[name[min], name[max]]]
call[name[self].variables][name[name]] assign[=] call[name[EnumVariable], parameter[name[name], call[name[choices].tolist, parameter[]]]] | keyword[def] identifier[add_jump] ( identifier[self] , identifier[name] , identifier[min] , identifier[max] , identifier[num] , identifier[warp] = keyword[None] , identifier[var_type] = identifier[float] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[var_type] , identifier[type] ):
keyword[if] identifier[var_type] == literal[string] :
identifier[var_type] = identifier[int]
keyword[elif] identifier[var_type] == literal[string] :
identifier[var_type] = identifier[float]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] %( identifier[var_type] ))
identifier[min] , identifier[max] = identifier[map] ( identifier[var_type] ,( identifier[min] , identifier[max] ))
identifier[num] = identifier[int] ( identifier[num] )
keyword[if] keyword[not] identifier[warp] :
identifier[choices] = identifier[np] . identifier[linspace] ( identifier[min] , identifier[max] , identifier[num] = identifier[num] , identifier[dtype] = identifier[var_type] )
keyword[elif] ( identifier[min] >= literal[int] ) keyword[and] identifier[warp] == literal[string] :
identifier[choices] = identifier[np] . identifier[logspace] ( identifier[np] . identifier[log10] ( identifier[min] ), identifier[np] . identifier[log10] ( identifier[max] ), identifier[num] = identifier[num] ,
identifier[dtype] = identifier[var_type] )
keyword[elif] ( identifier[min] <= literal[int] ) keyword[and] identifier[warp] == literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] %( identifier[name] , identifier[warp] ))
identifier[self] . identifier[variables] [ identifier[name] ]= identifier[EnumVariable] ( identifier[name] , identifier[choices] . identifier[tolist] ()) | def add_jump(self, name, min, max, num, warp=None, var_type=float):
""" An integer/float-valued enumerable with `num` items, bounded
between [`min`, `max`]. Note that the right endpoint of the interval
includes `max`. This is a wrapper around the add_enum. `jump` can be
a float or int.
"""
if not isinstance(var_type, type):
if var_type == 'int':
var_type = int # depends on [control=['if'], data=['var_type']]
elif var_type == 'float':
var_type = float # depends on [control=['if'], data=['var_type']]
else:
raise ValueError('var_type (%s) is not supported. use "int" or "float",' % var_type) # depends on [control=['if'], data=[]]
(min, max) = map(var_type, (min, max))
num = int(num)
if not warp:
choices = np.linspace(min, max, num=num, dtype=var_type) # depends on [control=['if'], data=[]]
elif min >= 0 and warp == 'log':
choices = np.logspace(np.log10(min), np.log10(max), num=num, dtype=var_type) # depends on [control=['if'], data=[]]
elif min <= 0 and warp == 'log':
raise ValueError('variable %s: log-warping requires min > 0') # depends on [control=['if'], data=[]]
else:
raise ValueError('variable %s: warp=%s is not supported. use None or "log",' % (name, warp))
self.variables[name] = EnumVariable(name, choices.tolist()) |
def calculate_gradient(self, batch_info, device, model, rollout):
""" Calculate loss of the supplied rollout """
assert isinstance(rollout, Trajectories), "ACER algorithm requires trajectory input"
local_epsilon = 1e-6
evaluator = model.evaluate(rollout)
actions = evaluator.get('rollout:actions')
rollout_probabilities = torch.exp(evaluator.get('rollout:logprobs'))
# We calculate the trust-region update with respect to the average model
if self.trust_region:
self.update_average_model(model)
logprobs = evaluator.get('model:logprobs')
q = evaluator.get('model:q')
# Selected action values
action_logprobs = select_indices(logprobs, actions)
action_q = select_indices(q, actions)
# We only want to propagate gradients through specific variables
with torch.no_grad():
model_probabilities = torch.exp(logprobs)
# Importance sampling correction - we must find the quotient of probabilities
rho = model_probabilities / (rollout_probabilities + local_epsilon)
# Probability quotient only for selected actions
actions_rho = select_indices(rho, actions)
# Calculate policy state values
model_state_values = (model_probabilities * q).sum(dim=1)
trajectory_rewards = rollout.transition_tensors['rewards']
trajectory_dones = rollout.transition_tensors['dones']
q_retraced = self.retrace(
trajectory_rewards,
trajectory_dones,
action_q.reshape(trajectory_rewards.size()),
model_state_values.reshape(trajectory_rewards.size()),
actions_rho.reshape(trajectory_rewards.size()),
rollout.rollout_tensors['final_values']
).flatten()
advantages = q_retraced - model_state_values
importance_sampling_coefficient = torch.min(actions_rho, self.rho_cap * torch.ones_like(actions_rho))
explained_variance = 1 - torch.var(q_retraced - action_q) / torch.var(q_retraced)
# Entropy of the policy distribution
policy_entropy = torch.mean(model.entropy(logprobs))
policy_gradient_loss = -torch.mean(advantages * importance_sampling_coefficient * action_logprobs)
# Policy gradient bias correction
with torch.no_grad():
advantages_bias_correction = q - model_state_values.view(model_probabilities.size(0), 1)
bias_correction_coefficient = F.relu(1.0 - self.rho_cap / (rho + local_epsilon))
# This sum is an expectation with respect to action probabilities according to model policy
policy_gradient_bias_correction_gain = torch.sum(
logprobs * bias_correction_coefficient * advantages_bias_correction * model_probabilities,
dim=1
)
policy_gradient_bias_correction_loss = - torch.mean(policy_gradient_bias_correction_gain)
policy_loss = policy_gradient_loss + policy_gradient_bias_correction_loss
q_function_loss = 0.5 * F.mse_loss(action_q, q_retraced)
if self.trust_region:
with torch.no_grad():
average_evaluator = self.average_model.evaluate(rollout)
average_action_logits = average_evaluator.get('model:logprobs')
actor_loss = policy_loss - self.entropy_coefficient * policy_entropy
q_loss = self.q_coefficient * q_function_loss
actor_gradient = torch.autograd.grad(-actor_loss, logprobs, retain_graph=True)[0]
# kl_divergence = model.kl_divergence(average_action_logits, action_logits).mean()
# kl_divergence_grad = torch.autograd.grad(kl_divergence, action_logits, retain_graph=True)
# Analytically calculated derivative of KL divergence on logits
# That makes it hardcoded for discrete action spaces
kl_divergence_grad_symbolic = - torch.exp(average_action_logits) / logprobs.size(0)
k_dot_g = (actor_gradient * kl_divergence_grad_symbolic).sum(dim=-1)
k_dot_k = (kl_divergence_grad_symbolic ** 2).sum(dim=-1)
adjustment = (k_dot_g - self.trust_region_delta) / k_dot_k
adjustment_clipped = adjustment.clamp(min=0.0)
actor_gradient_updated = actor_gradient - adjustment_clipped.view(adjustment_clipped.size(0), 1)
# Populate gradient from the newly updated fn
logprobs.backward(gradient=-actor_gradient_updated, retain_graph=True)
q_loss.backward(retain_graph=True)
else:
# Just populate gradient from the loss
loss = policy_loss + self.q_coefficient * q_function_loss - self.entropy_coefficient * policy_entropy
loss.backward()
return {
'policy_loss': policy_loss.item(),
'policy_gradient_loss': policy_gradient_loss.item(),
'policy_gradient_bias_correction': policy_gradient_bias_correction_loss.item(),
'avg_q_selected': action_q.mean().item(),
'avg_q_retraced': q_retraced.mean().item(),
'q_loss': q_function_loss.item(),
'policy_entropy': policy_entropy.item(),
'advantage_norm': torch.norm(advantages).item(),
'explained_variance': explained_variance.item(),
'model_prob_std': model_probabilities.std().item(),
'rollout_prob_std': rollout_probabilities.std().item()
} | def function[calculate_gradient, parameter[self, batch_info, device, model, rollout]]:
constant[ Calculate loss of the supplied rollout ]
assert[call[name[isinstance], parameter[name[rollout], name[Trajectories]]]]
variable[local_epsilon] assign[=] constant[1e-06]
variable[evaluator] assign[=] call[name[model].evaluate, parameter[name[rollout]]]
variable[actions] assign[=] call[name[evaluator].get, parameter[constant[rollout:actions]]]
variable[rollout_probabilities] assign[=] call[name[torch].exp, parameter[call[name[evaluator].get, parameter[constant[rollout:logprobs]]]]]
if name[self].trust_region begin[:]
call[name[self].update_average_model, parameter[name[model]]]
variable[logprobs] assign[=] call[name[evaluator].get, parameter[constant[model:logprobs]]]
variable[q] assign[=] call[name[evaluator].get, parameter[constant[model:q]]]
variable[action_logprobs] assign[=] call[name[select_indices], parameter[name[logprobs], name[actions]]]
variable[action_q] assign[=] call[name[select_indices], parameter[name[q], name[actions]]]
with call[name[torch].no_grad, parameter[]] begin[:]
variable[model_probabilities] assign[=] call[name[torch].exp, parameter[name[logprobs]]]
variable[rho] assign[=] binary_operation[name[model_probabilities] / binary_operation[name[rollout_probabilities] + name[local_epsilon]]]
variable[actions_rho] assign[=] call[name[select_indices], parameter[name[rho], name[actions]]]
variable[model_state_values] assign[=] call[binary_operation[name[model_probabilities] * name[q]].sum, parameter[]]
variable[trajectory_rewards] assign[=] call[name[rollout].transition_tensors][constant[rewards]]
variable[trajectory_dones] assign[=] call[name[rollout].transition_tensors][constant[dones]]
variable[q_retraced] assign[=] call[call[name[self].retrace, parameter[name[trajectory_rewards], name[trajectory_dones], call[name[action_q].reshape, parameter[call[name[trajectory_rewards].size, parameter[]]]], call[name[model_state_values].reshape, parameter[call[name[trajectory_rewards].size, parameter[]]]], call[name[actions_rho].reshape, parameter[call[name[trajectory_rewards].size, parameter[]]]], call[name[rollout].rollout_tensors][constant[final_values]]]].flatten, parameter[]]
variable[advantages] assign[=] binary_operation[name[q_retraced] - name[model_state_values]]
variable[importance_sampling_coefficient] assign[=] call[name[torch].min, parameter[name[actions_rho], binary_operation[name[self].rho_cap * call[name[torch].ones_like, parameter[name[actions_rho]]]]]]
variable[explained_variance] assign[=] binary_operation[constant[1] - binary_operation[call[name[torch].var, parameter[binary_operation[name[q_retraced] - name[action_q]]]] / call[name[torch].var, parameter[name[q_retraced]]]]]
variable[policy_entropy] assign[=] call[name[torch].mean, parameter[call[name[model].entropy, parameter[name[logprobs]]]]]
variable[policy_gradient_loss] assign[=] <ast.UnaryOp object at 0x7da1b1790b50>
with call[name[torch].no_grad, parameter[]] begin[:]
variable[advantages_bias_correction] assign[=] binary_operation[name[q] - call[name[model_state_values].view, parameter[call[name[model_probabilities].size, parameter[constant[0]]], constant[1]]]]
variable[bias_correction_coefficient] assign[=] call[name[F].relu, parameter[binary_operation[constant[1.0] - binary_operation[name[self].rho_cap / binary_operation[name[rho] + name[local_epsilon]]]]]]
variable[policy_gradient_bias_correction_gain] assign[=] call[name[torch].sum, parameter[binary_operation[binary_operation[binary_operation[name[logprobs] * name[bias_correction_coefficient]] * name[advantages_bias_correction]] * name[model_probabilities]]]]
variable[policy_gradient_bias_correction_loss] assign[=] <ast.UnaryOp object at 0x7da1b1791b70>
variable[policy_loss] assign[=] binary_operation[name[policy_gradient_loss] + name[policy_gradient_bias_correction_loss]]
variable[q_function_loss] assign[=] binary_operation[constant[0.5] * call[name[F].mse_loss, parameter[name[action_q], name[q_retraced]]]]
if name[self].trust_region begin[:]
with call[name[torch].no_grad, parameter[]] begin[:]
variable[average_evaluator] assign[=] call[name[self].average_model.evaluate, parameter[name[rollout]]]
variable[average_action_logits] assign[=] call[name[average_evaluator].get, parameter[constant[model:logprobs]]]
variable[actor_loss] assign[=] binary_operation[name[policy_loss] - binary_operation[name[self].entropy_coefficient * name[policy_entropy]]]
variable[q_loss] assign[=] binary_operation[name[self].q_coefficient * name[q_function_loss]]
variable[actor_gradient] assign[=] call[call[name[torch].autograd.grad, parameter[<ast.UnaryOp object at 0x7da1b1790040>, name[logprobs]]]][constant[0]]
variable[kl_divergence_grad_symbolic] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b1792410> / call[name[logprobs].size, parameter[constant[0]]]]
variable[k_dot_g] assign[=] call[binary_operation[name[actor_gradient] * name[kl_divergence_grad_symbolic]].sum, parameter[]]
variable[k_dot_k] assign[=] call[binary_operation[name[kl_divergence_grad_symbolic] ** constant[2]].sum, parameter[]]
variable[adjustment] assign[=] binary_operation[binary_operation[name[k_dot_g] - name[self].trust_region_delta] / name[k_dot_k]]
variable[adjustment_clipped] assign[=] call[name[adjustment].clamp, parameter[]]
variable[actor_gradient_updated] assign[=] binary_operation[name[actor_gradient] - call[name[adjustment_clipped].view, parameter[call[name[adjustment_clipped].size, parameter[constant[0]]], constant[1]]]]
call[name[logprobs].backward, parameter[]]
call[name[q_loss].backward, parameter[]]
return[dictionary[[<ast.Constant object at 0x7da204345780>, <ast.Constant object at 0x7da204346ef0>, <ast.Constant object at 0x7da204344b20>, <ast.Constant object at 0x7da204345b70>, <ast.Constant object at 0x7da2043479d0>, <ast.Constant object at 0x7da204345c30>, <ast.Constant object at 0x7da204344b50>, <ast.Constant object at 0x7da2043460b0>, <ast.Constant object at 0x7da204347cd0>, <ast.Constant object at 0x7da204347040>, <ast.Constant object at 0x7da204346e30>], [<ast.Call object at 0x7da204347d30>, <ast.Call object at 0x7da204345630>, <ast.Call object at 0x7da204346860>, <ast.Call object at 0x7da204347670>, <ast.Call object at 0x7da204346740>, <ast.Call object at 0x7da204344490>, <ast.Call object at 0x7da204346140>, <ast.Call object at 0x7da204345450>, <ast.Call object at 0x7da2043454b0>, <ast.Call object at 0x7da2043451e0>, <ast.Call object at 0x7da2043453c0>]]] | keyword[def] identifier[calculate_gradient] ( identifier[self] , identifier[batch_info] , identifier[device] , identifier[model] , identifier[rollout] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[rollout] , identifier[Trajectories] ), literal[string]
identifier[local_epsilon] = literal[int]
identifier[evaluator] = identifier[model] . identifier[evaluate] ( identifier[rollout] )
identifier[actions] = identifier[evaluator] . identifier[get] ( literal[string] )
identifier[rollout_probabilities] = identifier[torch] . identifier[exp] ( identifier[evaluator] . identifier[get] ( literal[string] ))
keyword[if] identifier[self] . identifier[trust_region] :
identifier[self] . identifier[update_average_model] ( identifier[model] )
identifier[logprobs] = identifier[evaluator] . identifier[get] ( literal[string] )
identifier[q] = identifier[evaluator] . identifier[get] ( literal[string] )
identifier[action_logprobs] = identifier[select_indices] ( identifier[logprobs] , identifier[actions] )
identifier[action_q] = identifier[select_indices] ( identifier[q] , identifier[actions] )
keyword[with] identifier[torch] . identifier[no_grad] ():
identifier[model_probabilities] = identifier[torch] . identifier[exp] ( identifier[logprobs] )
identifier[rho] = identifier[model_probabilities] /( identifier[rollout_probabilities] + identifier[local_epsilon] )
identifier[actions_rho] = identifier[select_indices] ( identifier[rho] , identifier[actions] )
identifier[model_state_values] =( identifier[model_probabilities] * identifier[q] ). identifier[sum] ( identifier[dim] = literal[int] )
identifier[trajectory_rewards] = identifier[rollout] . identifier[transition_tensors] [ literal[string] ]
identifier[trajectory_dones] = identifier[rollout] . identifier[transition_tensors] [ literal[string] ]
identifier[q_retraced] = identifier[self] . identifier[retrace] (
identifier[trajectory_rewards] ,
identifier[trajectory_dones] ,
identifier[action_q] . identifier[reshape] ( identifier[trajectory_rewards] . identifier[size] ()),
identifier[model_state_values] . identifier[reshape] ( identifier[trajectory_rewards] . identifier[size] ()),
identifier[actions_rho] . identifier[reshape] ( identifier[trajectory_rewards] . identifier[size] ()),
identifier[rollout] . identifier[rollout_tensors] [ literal[string] ]
). identifier[flatten] ()
identifier[advantages] = identifier[q_retraced] - identifier[model_state_values]
identifier[importance_sampling_coefficient] = identifier[torch] . identifier[min] ( identifier[actions_rho] , identifier[self] . identifier[rho_cap] * identifier[torch] . identifier[ones_like] ( identifier[actions_rho] ))
identifier[explained_variance] = literal[int] - identifier[torch] . identifier[var] ( identifier[q_retraced] - identifier[action_q] )/ identifier[torch] . identifier[var] ( identifier[q_retraced] )
identifier[policy_entropy] = identifier[torch] . identifier[mean] ( identifier[model] . identifier[entropy] ( identifier[logprobs] ))
identifier[policy_gradient_loss] =- identifier[torch] . identifier[mean] ( identifier[advantages] * identifier[importance_sampling_coefficient] * identifier[action_logprobs] )
keyword[with] identifier[torch] . identifier[no_grad] ():
identifier[advantages_bias_correction] = identifier[q] - identifier[model_state_values] . identifier[view] ( identifier[model_probabilities] . identifier[size] ( literal[int] ), literal[int] )
identifier[bias_correction_coefficient] = identifier[F] . identifier[relu] ( literal[int] - identifier[self] . identifier[rho_cap] /( identifier[rho] + identifier[local_epsilon] ))
identifier[policy_gradient_bias_correction_gain] = identifier[torch] . identifier[sum] (
identifier[logprobs] * identifier[bias_correction_coefficient] * identifier[advantages_bias_correction] * identifier[model_probabilities] ,
identifier[dim] = literal[int]
)
identifier[policy_gradient_bias_correction_loss] =- identifier[torch] . identifier[mean] ( identifier[policy_gradient_bias_correction_gain] )
identifier[policy_loss] = identifier[policy_gradient_loss] + identifier[policy_gradient_bias_correction_loss]
identifier[q_function_loss] = literal[int] * identifier[F] . identifier[mse_loss] ( identifier[action_q] , identifier[q_retraced] )
keyword[if] identifier[self] . identifier[trust_region] :
keyword[with] identifier[torch] . identifier[no_grad] ():
identifier[average_evaluator] = identifier[self] . identifier[average_model] . identifier[evaluate] ( identifier[rollout] )
identifier[average_action_logits] = identifier[average_evaluator] . identifier[get] ( literal[string] )
identifier[actor_loss] = identifier[policy_loss] - identifier[self] . identifier[entropy_coefficient] * identifier[policy_entropy]
identifier[q_loss] = identifier[self] . identifier[q_coefficient] * identifier[q_function_loss]
identifier[actor_gradient] = identifier[torch] . identifier[autograd] . identifier[grad] (- identifier[actor_loss] , identifier[logprobs] , identifier[retain_graph] = keyword[True] )[ literal[int] ]
identifier[kl_divergence_grad_symbolic] =- identifier[torch] . identifier[exp] ( identifier[average_action_logits] )/ identifier[logprobs] . identifier[size] ( literal[int] )
identifier[k_dot_g] =( identifier[actor_gradient] * identifier[kl_divergence_grad_symbolic] ). identifier[sum] ( identifier[dim] =- literal[int] )
identifier[k_dot_k] =( identifier[kl_divergence_grad_symbolic] ** literal[int] ). identifier[sum] ( identifier[dim] =- literal[int] )
identifier[adjustment] =( identifier[k_dot_g] - identifier[self] . identifier[trust_region_delta] )/ identifier[k_dot_k]
identifier[adjustment_clipped] = identifier[adjustment] . identifier[clamp] ( identifier[min] = literal[int] )
identifier[actor_gradient_updated] = identifier[actor_gradient] - identifier[adjustment_clipped] . identifier[view] ( identifier[adjustment_clipped] . identifier[size] ( literal[int] ), literal[int] )
identifier[logprobs] . identifier[backward] ( identifier[gradient] =- identifier[actor_gradient_updated] , identifier[retain_graph] = keyword[True] )
identifier[q_loss] . identifier[backward] ( identifier[retain_graph] = keyword[True] )
keyword[else] :
identifier[loss] = identifier[policy_loss] + identifier[self] . identifier[q_coefficient] * identifier[q_function_loss] - identifier[self] . identifier[entropy_coefficient] * identifier[policy_entropy]
identifier[loss] . identifier[backward] ()
keyword[return] {
literal[string] : identifier[policy_loss] . identifier[item] (),
literal[string] : identifier[policy_gradient_loss] . identifier[item] (),
literal[string] : identifier[policy_gradient_bias_correction_loss] . identifier[item] (),
literal[string] : identifier[action_q] . identifier[mean] (). identifier[item] (),
literal[string] : identifier[q_retraced] . identifier[mean] (). identifier[item] (),
literal[string] : identifier[q_function_loss] . identifier[item] (),
literal[string] : identifier[policy_entropy] . identifier[item] (),
literal[string] : identifier[torch] . identifier[norm] ( identifier[advantages] ). identifier[item] (),
literal[string] : identifier[explained_variance] . identifier[item] (),
literal[string] : identifier[model_probabilities] . identifier[std] (). identifier[item] (),
literal[string] : identifier[rollout_probabilities] . identifier[std] (). identifier[item] ()
} | def calculate_gradient(self, batch_info, device, model, rollout):
""" Calculate loss of the supplied rollout """
assert isinstance(rollout, Trajectories), 'ACER algorithm requires trajectory input'
local_epsilon = 1e-06
evaluator = model.evaluate(rollout)
actions = evaluator.get('rollout:actions')
rollout_probabilities = torch.exp(evaluator.get('rollout:logprobs'))
# We calculate the trust-region update with respect to the average model
if self.trust_region:
self.update_average_model(model) # depends on [control=['if'], data=[]]
logprobs = evaluator.get('model:logprobs')
q = evaluator.get('model:q')
# Selected action values
action_logprobs = select_indices(logprobs, actions)
action_q = select_indices(q, actions)
# We only want to propagate gradients through specific variables
with torch.no_grad():
model_probabilities = torch.exp(logprobs)
# Importance sampling correction - we must find the quotient of probabilities
rho = model_probabilities / (rollout_probabilities + local_epsilon)
# Probability quotient only for selected actions
actions_rho = select_indices(rho, actions)
# Calculate policy state values
model_state_values = (model_probabilities * q).sum(dim=1)
trajectory_rewards = rollout.transition_tensors['rewards']
trajectory_dones = rollout.transition_tensors['dones']
q_retraced = self.retrace(trajectory_rewards, trajectory_dones, action_q.reshape(trajectory_rewards.size()), model_state_values.reshape(trajectory_rewards.size()), actions_rho.reshape(trajectory_rewards.size()), rollout.rollout_tensors['final_values']).flatten()
advantages = q_retraced - model_state_values
importance_sampling_coefficient = torch.min(actions_rho, self.rho_cap * torch.ones_like(actions_rho))
explained_variance = 1 - torch.var(q_retraced - action_q) / torch.var(q_retraced) # depends on [control=['with'], data=[]]
# Entropy of the policy distribution
policy_entropy = torch.mean(model.entropy(logprobs))
policy_gradient_loss = -torch.mean(advantages * importance_sampling_coefficient * action_logprobs)
# Policy gradient bias correction
with torch.no_grad():
advantages_bias_correction = q - model_state_values.view(model_probabilities.size(0), 1)
bias_correction_coefficient = F.relu(1.0 - self.rho_cap / (rho + local_epsilon)) # depends on [control=['with'], data=[]]
# This sum is an expectation with respect to action probabilities according to model policy
policy_gradient_bias_correction_gain = torch.sum(logprobs * bias_correction_coefficient * advantages_bias_correction * model_probabilities, dim=1)
policy_gradient_bias_correction_loss = -torch.mean(policy_gradient_bias_correction_gain)
policy_loss = policy_gradient_loss + policy_gradient_bias_correction_loss
q_function_loss = 0.5 * F.mse_loss(action_q, q_retraced)
if self.trust_region:
with torch.no_grad():
average_evaluator = self.average_model.evaluate(rollout)
average_action_logits = average_evaluator.get('model:logprobs') # depends on [control=['with'], data=[]]
actor_loss = policy_loss - self.entropy_coefficient * policy_entropy
q_loss = self.q_coefficient * q_function_loss
actor_gradient = torch.autograd.grad(-actor_loss, logprobs, retain_graph=True)[0]
# kl_divergence = model.kl_divergence(average_action_logits, action_logits).mean()
# kl_divergence_grad = torch.autograd.grad(kl_divergence, action_logits, retain_graph=True)
# Analytically calculated derivative of KL divergence on logits
# That makes it hardcoded for discrete action spaces
kl_divergence_grad_symbolic = -torch.exp(average_action_logits) / logprobs.size(0)
k_dot_g = (actor_gradient * kl_divergence_grad_symbolic).sum(dim=-1)
k_dot_k = (kl_divergence_grad_symbolic ** 2).sum(dim=-1)
adjustment = (k_dot_g - self.trust_region_delta) / k_dot_k
adjustment_clipped = adjustment.clamp(min=0.0)
actor_gradient_updated = actor_gradient - adjustment_clipped.view(adjustment_clipped.size(0), 1)
# Populate gradient from the newly updated fn
logprobs.backward(gradient=-actor_gradient_updated, retain_graph=True)
q_loss.backward(retain_graph=True) # depends on [control=['if'], data=[]]
else:
# Just populate gradient from the loss
loss = policy_loss + self.q_coefficient * q_function_loss - self.entropy_coefficient * policy_entropy
loss.backward()
return {'policy_loss': policy_loss.item(), 'policy_gradient_loss': policy_gradient_loss.item(), 'policy_gradient_bias_correction': policy_gradient_bias_correction_loss.item(), 'avg_q_selected': action_q.mean().item(), 'avg_q_retraced': q_retraced.mean().item(), 'q_loss': q_function_loss.item(), 'policy_entropy': policy_entropy.item(), 'advantage_norm': torch.norm(advantages).item(), 'explained_variance': explained_variance.item(), 'model_prob_std': model_probabilities.std().item(), 'rollout_prob_std': rollout_probabilities.std().item()} |
def set_lcm_config(config_mode=None,
config_mode_freq=None,
refresh_freq=None,
reboot_if_needed=None,
action_after_reboot=None,
refresh_mode=None,
certificate_id=None,
configuration_id=None,
allow_module_overwrite=None,
debug_mode=False,
status_retention_days=None):
'''
For detailed descriptions of the parameters see:
https://msdn.microsoft.com/en-us/PowerShell/DSC/metaConfig
config_mode (str): How the LCM applies the configuration. Valid values
are:
- ApplyOnly
- ApplyAndMonitor
- ApplyAndAutoCorrect
config_mode_freq (int): How often, in minutes, the current configuration
is checked and applied. Ignored if config_mode is set to ApplyOnly.
Default is 15.
refresh_mode (str): How the LCM gets configurations. Valid values are:
- Disabled
- Push
- Pull
refresh_freq (int): How often, in minutes, the LCM checks for updated
configurations. (pull mode only) Default is 30.
reboot_if_needed (bool): Reboot the machine if needed after a
configuration is applied. Default is False.
action_after_reboot (str): Action to take after reboot. Valid values
are:
- ContinueConfiguration
- StopConfiguration
certificate_id (guid): A GUID that specifies a certificate used to
access the configuration: (pull mode)
configuration_id (guid): A GUID that identifies the config file to get
from a pull server. (pull mode)
allow_module_overwrite (bool): New configs are allowed to overwrite old
ones on the target node.
debug_mode (str): Sets the debug level. Valid values are:
- None
- ForceModuleImport
- All
status_retention_days (int): Number of days to keep status of the
current config.
.. note::
Either ``config_mode_freq`` or ``refresh_freq`` needs to be a
multiple of the other. See documentation on MSDN for more details.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' dsc.set_lcm_config ApplyOnly
'''
temp_dir = os.getenv('TEMP', '{0}\\temp'.format(os.getenv('WINDIR')))
cmd = 'Configuration SaltConfig {'
cmd += ' Node localhost {'
cmd += ' LocalConfigurationManager {'
if config_mode:
if config_mode not in ('ApplyOnly', 'ApplyAndMonitor',
'ApplyAndAutoCorrect'):
error = 'config_mode must be one of ApplyOnly, ApplyAndMonitor, ' \
'or ApplyAndAutoCorrect. Passed {0}'.format(config_mode)
raise SaltInvocationError(error)
cmd += ' ConfigurationMode = "{0}";'.format(config_mode)
if config_mode_freq:
if not isinstance(config_mode_freq, int):
error = 'config_mode_freq must be an integer. Passed {0}'.format(
config_mode_freq
)
raise SaltInvocationError(error)
cmd += ' ConfigurationModeFrequencyMins = {0};'.format(config_mode_freq)
if refresh_mode:
if refresh_mode not in ('Disabled', 'Push', 'Pull'):
raise SaltInvocationError(
'refresh_mode must be one of Disabled, Push, or Pull'
)
cmd += ' RefreshMode = "{0}";'.format(refresh_mode)
if refresh_freq:
if not isinstance(refresh_freq, int):
raise SaltInvocationError('refresh_freq must be an integer')
cmd += ' RefreshFrequencyMins = {0};'.format(refresh_freq)
if reboot_if_needed is not None:
if not isinstance(reboot_if_needed, bool):
raise SaltInvocationError('reboot_if_needed must be a boolean value')
if reboot_if_needed:
reboot_if_needed = '$true'
else:
reboot_if_needed = '$false'
cmd += ' RebootNodeIfNeeded = {0};'.format(reboot_if_needed)
if action_after_reboot:
if action_after_reboot not in ('ContinueConfiguration',
'StopConfiguration'):
raise SaltInvocationError(
'action_after_reboot must be one of '
'ContinueConfiguration or StopConfiguration'
)
cmd += ' ActionAfterReboot = "{0}"'.format(action_after_reboot)
if certificate_id is not None:
if certificate_id == '':
certificate_id = None
cmd += ' CertificateID = "{0}";'.format(certificate_id)
if configuration_id is not None:
if configuration_id == '':
configuration_id = None
cmd += ' ConfigurationID = "{0}";'.format(configuration_id)
if allow_module_overwrite is not None:
if not isinstance(allow_module_overwrite, bool):
raise SaltInvocationError('allow_module_overwrite must be a boolean value')
if allow_module_overwrite:
allow_module_overwrite = '$true'
else:
allow_module_overwrite = '$false'
cmd += ' AllowModuleOverwrite = {0};'.format(allow_module_overwrite)
if debug_mode is not False:
if debug_mode is None:
debug_mode = 'None'
if debug_mode not in ('None', 'ForceModuleImport', 'All'):
raise SaltInvocationError(
'debug_mode must be one of None, ForceModuleImport, '
'ResourceScriptBreakAll, or All'
)
cmd += ' DebugMode = "{0}";'.format(debug_mode)
if status_retention_days:
if not isinstance(status_retention_days, int):
raise SaltInvocationError('status_retention_days must be an integer')
cmd += ' StatusRetentionTimeInDays = {0};'.format(status_retention_days)
cmd += ' }}};'
cmd += r'SaltConfig -OutputPath "{0}\SaltConfig"'.format(temp_dir)
# Execute Config to create the .mof
_pshell(cmd)
# Apply the config
cmd = r'Set-DscLocalConfigurationManager -Path "{0}\SaltConfig"' \
r''.format(temp_dir)
ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True)
__salt__['file.remove'](r'{0}\SaltConfig'.format(temp_dir))
if not ret['retcode']:
log.info('DSC: LCM config applied successfully')
return True
else:
log.error('DSC: Failed to apply LCM config. Error %s', ret)
return False | def function[set_lcm_config, parameter[config_mode, config_mode_freq, refresh_freq, reboot_if_needed, action_after_reboot, refresh_mode, certificate_id, configuration_id, allow_module_overwrite, debug_mode, status_retention_days]]:
constant[
For detailed descriptions of the parameters see:
https://msdn.microsoft.com/en-us/PowerShell/DSC/metaConfig
config_mode (str): How the LCM applies the configuration. Valid values
are:
- ApplyOnly
- ApplyAndMonitor
- ApplyAndAutoCorrect
config_mode_freq (int): How often, in minutes, the current configuration
is checked and applied. Ignored if config_mode is set to ApplyOnly.
Default is 15.
refresh_mode (str): How the LCM gets configurations. Valid values are:
- Disabled
- Push
- Pull
refresh_freq (int): How often, in minutes, the LCM checks for updated
configurations. (pull mode only) Default is 30.
reboot_if_needed (bool): Reboot the machine if needed after a
configuration is applied. Default is False.
action_after_reboot (str): Action to take after reboot. Valid values
are:
- ContinueConfiguration
- StopConfiguration
certificate_id (guid): A GUID that specifies a certificate used to
access the configuration: (pull mode)
configuration_id (guid): A GUID that identifies the config file to get
from a pull server. (pull mode)
allow_module_overwrite (bool): New configs are allowed to overwrite old
ones on the target node.
debug_mode (str): Sets the debug level. Valid values are:
- None
- ForceModuleImport
- All
status_retention_days (int): Number of days to keep status of the
current config.
.. note::
Either ``config_mode_freq`` or ``refresh_freq`` needs to be a
multiple of the other. See documentation on MSDN for more details.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' dsc.set_lcm_config ApplyOnly
]
variable[temp_dir] assign[=] call[name[os].getenv, parameter[constant[TEMP], call[constant[{0}\temp].format, parameter[call[name[os].getenv, parameter[constant[WINDIR]]]]]]]
variable[cmd] assign[=] constant[Configuration SaltConfig {]
<ast.AugAssign object at 0x7da207f9b010>
<ast.AugAssign object at 0x7da207f9ae60>
if name[config_mode] begin[:]
if compare[name[config_mode] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da207f98910>, <ast.Constant object at 0x7da207f98940>, <ast.Constant object at 0x7da207f9a740>]]] begin[:]
variable[error] assign[=] call[constant[config_mode must be one of ApplyOnly, ApplyAndMonitor, or ApplyAndAutoCorrect. Passed {0}].format, parameter[name[config_mode]]]
<ast.Raise object at 0x7da207f9af50>
<ast.AugAssign object at 0x7da207f99db0>
if name[config_mode_freq] begin[:]
if <ast.UnaryOp object at 0x7da207f99330> begin[:]
variable[error] assign[=] call[constant[config_mode_freq must be an integer. Passed {0}].format, parameter[name[config_mode_freq]]]
<ast.Raise object at 0x7da207f98b20>
<ast.AugAssign object at 0x7da207f9a0b0>
if name[refresh_mode] begin[:]
if compare[name[refresh_mode] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da207f98280>, <ast.Constant object at 0x7da207f99270>, <ast.Constant object at 0x7da207f98c40>]]] begin[:]
<ast.Raise object at 0x7da207f99090>
<ast.AugAssign object at 0x7da207f98bb0>
if name[refresh_freq] begin[:]
if <ast.UnaryOp object at 0x7da207f99420> begin[:]
<ast.Raise object at 0x7da207f9b2e0>
<ast.AugAssign object at 0x7da207f9a680>
if compare[name[reboot_if_needed] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da207f98730> begin[:]
<ast.Raise object at 0x7da207f9b640>
if name[reboot_if_needed] begin[:]
variable[reboot_if_needed] assign[=] constant[$true]
<ast.AugAssign object at 0x7da207f9acb0>
if name[action_after_reboot] begin[:]
if compare[name[action_after_reboot] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da207f985b0>, <ast.Constant object at 0x7da207f9b4f0>]]] begin[:]
<ast.Raise object at 0x7da207f984c0>
<ast.AugAssign object at 0x7da207f98370>
if compare[name[certificate_id] is_not constant[None]] begin[:]
if compare[name[certificate_id] equal[==] constant[]] begin[:]
variable[certificate_id] assign[=] constant[None]
<ast.AugAssign object at 0x7da207f9a410>
if compare[name[configuration_id] is_not constant[None]] begin[:]
if compare[name[configuration_id] equal[==] constant[]] begin[:]
variable[configuration_id] assign[=] constant[None]
<ast.AugAssign object at 0x7da207f9a2c0>
if compare[name[allow_module_overwrite] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da207f994b0> begin[:]
<ast.Raise object at 0x7da207f9ae30>
if name[allow_module_overwrite] begin[:]
variable[allow_module_overwrite] assign[=] constant[$true]
<ast.AugAssign object at 0x7da20c76e920>
if compare[name[debug_mode] is_not constant[False]] begin[:]
if compare[name[debug_mode] is constant[None]] begin[:]
variable[debug_mode] assign[=] constant[None]
if compare[name[debug_mode] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da20c76df90>, <ast.Constant object at 0x7da20c76e620>, <ast.Constant object at 0x7da20c76c8e0>]]] begin[:]
<ast.Raise object at 0x7da20c76c700>
<ast.AugAssign object at 0x7da20c76ded0>
if name[status_retention_days] begin[:]
if <ast.UnaryOp object at 0x7da20c76ec20> begin[:]
<ast.Raise object at 0x7da20c76d240>
<ast.AugAssign object at 0x7da20c76c5e0>
<ast.AugAssign object at 0x7da20c76e1a0>
<ast.AugAssign object at 0x7da20c76efe0>
call[name[_pshell], parameter[name[cmd]]]
variable[cmd] assign[=] call[constant[Set-DscLocalConfigurationManager -Path "{0}\SaltConfig"].format, parameter[name[temp_dir]]]
variable[ret] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]
call[call[name[__salt__]][constant[file.remove]], parameter[call[constant[{0}\SaltConfig].format, parameter[name[temp_dir]]]]]
if <ast.UnaryOp object at 0x7da20c76ec80> begin[:]
call[name[log].info, parameter[constant[DSC: LCM config applied successfully]]]
return[constant[True]] | keyword[def] identifier[set_lcm_config] ( identifier[config_mode] = keyword[None] ,
identifier[config_mode_freq] = keyword[None] ,
identifier[refresh_freq] = keyword[None] ,
identifier[reboot_if_needed] = keyword[None] ,
identifier[action_after_reboot] = keyword[None] ,
identifier[refresh_mode] = keyword[None] ,
identifier[certificate_id] = keyword[None] ,
identifier[configuration_id] = keyword[None] ,
identifier[allow_module_overwrite] = keyword[None] ,
identifier[debug_mode] = keyword[False] ,
identifier[status_retention_days] = keyword[None] ):
literal[string]
identifier[temp_dir] = identifier[os] . identifier[getenv] ( literal[string] , literal[string] . identifier[format] ( identifier[os] . identifier[getenv] ( literal[string] )))
identifier[cmd] = literal[string]
identifier[cmd] += literal[string]
identifier[cmd] += literal[string]
keyword[if] identifier[config_mode] :
keyword[if] identifier[config_mode] keyword[not] keyword[in] ( literal[string] , literal[string] ,
literal[string] ):
identifier[error] = literal[string] literal[string] . identifier[format] ( identifier[config_mode] )
keyword[raise] identifier[SaltInvocationError] ( identifier[error] )
identifier[cmd] += literal[string] . identifier[format] ( identifier[config_mode] )
keyword[if] identifier[config_mode_freq] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[config_mode_freq] , identifier[int] ):
identifier[error] = literal[string] . identifier[format] (
identifier[config_mode_freq]
)
keyword[raise] identifier[SaltInvocationError] ( identifier[error] )
identifier[cmd] += literal[string] . identifier[format] ( identifier[config_mode_freq] )
keyword[if] identifier[refresh_mode] :
keyword[if] identifier[refresh_mode] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[SaltInvocationError] (
literal[string]
)
identifier[cmd] += literal[string] . identifier[format] ( identifier[refresh_mode] )
keyword[if] identifier[refresh_freq] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[refresh_freq] , identifier[int] ):
keyword[raise] identifier[SaltInvocationError] ( literal[string] )
identifier[cmd] += literal[string] . identifier[format] ( identifier[refresh_freq] )
keyword[if] identifier[reboot_if_needed] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[reboot_if_needed] , identifier[bool] ):
keyword[raise] identifier[SaltInvocationError] ( literal[string] )
keyword[if] identifier[reboot_if_needed] :
identifier[reboot_if_needed] = literal[string]
keyword[else] :
identifier[reboot_if_needed] = literal[string]
identifier[cmd] += literal[string] . identifier[format] ( identifier[reboot_if_needed] )
keyword[if] identifier[action_after_reboot] :
keyword[if] identifier[action_after_reboot] keyword[not] keyword[in] ( literal[string] ,
literal[string] ):
keyword[raise] identifier[SaltInvocationError] (
literal[string]
literal[string]
)
identifier[cmd] += literal[string] . identifier[format] ( identifier[action_after_reboot] )
keyword[if] identifier[certificate_id] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[certificate_id] == literal[string] :
identifier[certificate_id] = keyword[None]
identifier[cmd] += literal[string] . identifier[format] ( identifier[certificate_id] )
keyword[if] identifier[configuration_id] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[configuration_id] == literal[string] :
identifier[configuration_id] = keyword[None]
identifier[cmd] += literal[string] . identifier[format] ( identifier[configuration_id] )
keyword[if] identifier[allow_module_overwrite] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[allow_module_overwrite] , identifier[bool] ):
keyword[raise] identifier[SaltInvocationError] ( literal[string] )
keyword[if] identifier[allow_module_overwrite] :
identifier[allow_module_overwrite] = literal[string]
keyword[else] :
identifier[allow_module_overwrite] = literal[string]
identifier[cmd] += literal[string] . identifier[format] ( identifier[allow_module_overwrite] )
keyword[if] identifier[debug_mode] keyword[is] keyword[not] keyword[False] :
keyword[if] identifier[debug_mode] keyword[is] keyword[None] :
identifier[debug_mode] = literal[string]
keyword[if] identifier[debug_mode] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[SaltInvocationError] (
literal[string]
literal[string]
)
identifier[cmd] += literal[string] . identifier[format] ( identifier[debug_mode] )
keyword[if] identifier[status_retention_days] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[status_retention_days] , identifier[int] ):
keyword[raise] identifier[SaltInvocationError] ( literal[string] )
identifier[cmd] += literal[string] . identifier[format] ( identifier[status_retention_days] )
identifier[cmd] += literal[string]
identifier[cmd] += literal[string] . identifier[format] ( identifier[temp_dir] )
identifier[_pshell] ( identifier[cmd] )
identifier[cmd] = literal[string] literal[string] . identifier[format] ( identifier[temp_dir] )
identifier[ret] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[shell] = literal[string] , identifier[python_shell] = keyword[True] )
identifier[__salt__] [ literal[string] ]( literal[string] . identifier[format] ( identifier[temp_dir] ))
keyword[if] keyword[not] identifier[ret] [ literal[string] ]:
identifier[log] . identifier[info] ( literal[string] )
keyword[return] keyword[True]
keyword[else] :
identifier[log] . identifier[error] ( literal[string] , identifier[ret] )
keyword[return] keyword[False] | def set_lcm_config(config_mode=None, config_mode_freq=None, refresh_freq=None, reboot_if_needed=None, action_after_reboot=None, refresh_mode=None, certificate_id=None, configuration_id=None, allow_module_overwrite=None, debug_mode=False, status_retention_days=None):
"""
For detailed descriptions of the parameters see:
https://msdn.microsoft.com/en-us/PowerShell/DSC/metaConfig
config_mode (str): How the LCM applies the configuration. Valid values
are:
- ApplyOnly
- ApplyAndMonitor
- ApplyAndAutoCorrect
config_mode_freq (int): How often, in minutes, the current configuration
is checked and applied. Ignored if config_mode is set to ApplyOnly.
Default is 15.
refresh_mode (str): How the LCM gets configurations. Valid values are:
- Disabled
- Push
- Pull
refresh_freq (int): How often, in minutes, the LCM checks for updated
configurations. (pull mode only) Default is 30.
reboot_if_needed (bool): Reboot the machine if needed after a
configuration is applied. Default is False.
action_after_reboot (str): Action to take after reboot. Valid values
are:
- ContinueConfiguration
- StopConfiguration
certificate_id (guid): A GUID that specifies a certificate used to
access the configuration: (pull mode)
configuration_id (guid): A GUID that identifies the config file to get
from a pull server. (pull mode)
allow_module_overwrite (bool): New configs are allowed to overwrite old
ones on the target node.
debug_mode (str): Sets the debug level. Valid values are:
- None
- ForceModuleImport
- All
status_retention_days (int): Number of days to keep status of the
current config.
.. note::
Either ``config_mode_freq`` or ``refresh_freq`` needs to be a
multiple of the other. See documentation on MSDN for more details.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' dsc.set_lcm_config ApplyOnly
"""
temp_dir = os.getenv('TEMP', '{0}\\temp'.format(os.getenv('WINDIR')))
cmd = 'Configuration SaltConfig {'
cmd += ' Node localhost {'
cmd += ' LocalConfigurationManager {'
if config_mode:
if config_mode not in ('ApplyOnly', 'ApplyAndMonitor', 'ApplyAndAutoCorrect'):
error = 'config_mode must be one of ApplyOnly, ApplyAndMonitor, or ApplyAndAutoCorrect. Passed {0}'.format(config_mode)
raise SaltInvocationError(error) # depends on [control=['if'], data=['config_mode']]
cmd += ' ConfigurationMode = "{0}";'.format(config_mode) # depends on [control=['if'], data=[]]
if config_mode_freq:
if not isinstance(config_mode_freq, int):
error = 'config_mode_freq must be an integer. Passed {0}'.format(config_mode_freq)
raise SaltInvocationError(error) # depends on [control=['if'], data=[]]
cmd += ' ConfigurationModeFrequencyMins = {0};'.format(config_mode_freq) # depends on [control=['if'], data=[]]
if refresh_mode:
if refresh_mode not in ('Disabled', 'Push', 'Pull'):
raise SaltInvocationError('refresh_mode must be one of Disabled, Push, or Pull') # depends on [control=['if'], data=[]]
cmd += ' RefreshMode = "{0}";'.format(refresh_mode) # depends on [control=['if'], data=[]]
if refresh_freq:
if not isinstance(refresh_freq, int):
raise SaltInvocationError('refresh_freq must be an integer') # depends on [control=['if'], data=[]]
cmd += ' RefreshFrequencyMins = {0};'.format(refresh_freq) # depends on [control=['if'], data=[]]
if reboot_if_needed is not None:
if not isinstance(reboot_if_needed, bool):
raise SaltInvocationError('reboot_if_needed must be a boolean value') # depends on [control=['if'], data=[]]
if reboot_if_needed:
reboot_if_needed = '$true' # depends on [control=['if'], data=[]]
else:
reboot_if_needed = '$false'
cmd += ' RebootNodeIfNeeded = {0};'.format(reboot_if_needed) # depends on [control=['if'], data=['reboot_if_needed']]
if action_after_reboot:
if action_after_reboot not in ('ContinueConfiguration', 'StopConfiguration'):
raise SaltInvocationError('action_after_reboot must be one of ContinueConfiguration or StopConfiguration') # depends on [control=['if'], data=[]]
cmd += ' ActionAfterReboot = "{0}"'.format(action_after_reboot) # depends on [control=['if'], data=[]]
if certificate_id is not None:
if certificate_id == '':
certificate_id = None # depends on [control=['if'], data=['certificate_id']]
cmd += ' CertificateID = "{0}";'.format(certificate_id) # depends on [control=['if'], data=['certificate_id']]
if configuration_id is not None:
if configuration_id == '':
configuration_id = None # depends on [control=['if'], data=['configuration_id']]
cmd += ' ConfigurationID = "{0}";'.format(configuration_id) # depends on [control=['if'], data=['configuration_id']]
if allow_module_overwrite is not None:
if not isinstance(allow_module_overwrite, bool):
raise SaltInvocationError('allow_module_overwrite must be a boolean value') # depends on [control=['if'], data=[]]
if allow_module_overwrite:
allow_module_overwrite = '$true' # depends on [control=['if'], data=[]]
else:
allow_module_overwrite = '$false'
cmd += ' AllowModuleOverwrite = {0};'.format(allow_module_overwrite) # depends on [control=['if'], data=['allow_module_overwrite']]
if debug_mode is not False:
if debug_mode is None:
debug_mode = 'None' # depends on [control=['if'], data=['debug_mode']]
if debug_mode not in ('None', 'ForceModuleImport', 'All'):
raise SaltInvocationError('debug_mode must be one of None, ForceModuleImport, ResourceScriptBreakAll, or All') # depends on [control=['if'], data=[]]
cmd += ' DebugMode = "{0}";'.format(debug_mode) # depends on [control=['if'], data=['debug_mode']]
if status_retention_days:
if not isinstance(status_retention_days, int):
raise SaltInvocationError('status_retention_days must be an integer') # depends on [control=['if'], data=[]]
cmd += ' StatusRetentionTimeInDays = {0};'.format(status_retention_days) # depends on [control=['if'], data=[]]
cmd += ' }}};'
cmd += 'SaltConfig -OutputPath "{0}\\SaltConfig"'.format(temp_dir)
# Execute Config to create the .mof
_pshell(cmd)
# Apply the config
cmd = 'Set-DscLocalConfigurationManager -Path "{0}\\SaltConfig"'.format(temp_dir)
ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True)
__salt__['file.remove']('{0}\\SaltConfig'.format(temp_dir))
if not ret['retcode']:
log.info('DSC: LCM config applied successfully')
return True # depends on [control=['if'], data=[]]
else:
log.error('DSC: Failed to apply LCM config. Error %s', ret)
return False |
def _get_boxes(pos, size=None, margin=0, keep_aspect_ratio=True):
"""Generate non-overlapping boxes in NDC from a set of positions."""
# Get x, y.
pos = np.asarray(pos, dtype=np.float64)
x, y = pos.T
x = x[:, np.newaxis]
y = y[:, np.newaxis]
w, h = size if size is not None else _get_box_size(x, y, margin=margin)
x0, y0 = x - w, y - h
x1, y1 = x + w, y + h
# Renormalize the whole thing by keeping the aspect ratio.
x0min, y0min, x1max, y1max = x0.min(), y0.min(), x1.max(), y1.max()
if not keep_aspect_ratio:
b = (x0min, y0min, x1max, y1max)
else:
dx = x1max - x0min
dy = y1max - y0min
if dx > dy:
b = (x0min, (y1max + y0min) / 2. - dx / 2.,
x1max, (y1max + y0min) / 2. + dx / 2.)
else:
b = ((x1max + x0min) / 2. - dy / 2., y0min,
(x1max + x0min) / 2. + dy / 2., y1max)
r = Range(from_bounds=b,
to_bounds=(-1, -1, 1, 1))
return np.c_[r.apply(np.c_[x0, y0]), r.apply(np.c_[x1, y1])] | def function[_get_boxes, parameter[pos, size, margin, keep_aspect_ratio]]:
constant[Generate non-overlapping boxes in NDC from a set of positions.]
variable[pos] assign[=] call[name[np].asarray, parameter[name[pos]]]
<ast.Tuple object at 0x7da1b13860b0> assign[=] name[pos].T
variable[x] assign[=] call[name[x]][tuple[[<ast.Slice object at 0x7da1b1387e80>, <ast.Attribute object at 0x7da1b1386b60>]]]
variable[y] assign[=] call[name[y]][tuple[[<ast.Slice object at 0x7da1b13843a0>, <ast.Attribute object at 0x7da1b1385d20>]]]
<ast.Tuple object at 0x7da1b1384160> assign[=] <ast.IfExp object at 0x7da1b1385420>
<ast.Tuple object at 0x7da1b1385570> assign[=] tuple[[<ast.BinOp object at 0x7da1b13876d0>, <ast.BinOp object at 0x7da1b1387250>]]
<ast.Tuple object at 0x7da1b1387cd0> assign[=] tuple[[<ast.BinOp object at 0x7da1b1384430>, <ast.BinOp object at 0x7da1b1385f00>]]
<ast.Tuple object at 0x7da1b1385b40> assign[=] tuple[[<ast.Call object at 0x7da1b1387550>, <ast.Call object at 0x7da1b1385f60>, <ast.Call object at 0x7da1b1386cb0>, <ast.Call object at 0x7da1b1385870>]]
if <ast.UnaryOp object at 0x7da1b1384640> begin[:]
variable[b] assign[=] tuple[[<ast.Name object at 0x7da1b1385fc0>, <ast.Name object at 0x7da1b1385d80>, <ast.Name object at 0x7da1b1386f80>, <ast.Name object at 0x7da1b1387dc0>]]
variable[r] assign[=] call[name[Range], parameter[]]
return[call[name[np].c_][tuple[[<ast.Call object at 0x7da2044c0b20>, <ast.Call object at 0x7da1b13e5540>]]]] | keyword[def] identifier[_get_boxes] ( identifier[pos] , identifier[size] = keyword[None] , identifier[margin] = literal[int] , identifier[keep_aspect_ratio] = keyword[True] ):
literal[string]
identifier[pos] = identifier[np] . identifier[asarray] ( identifier[pos] , identifier[dtype] = identifier[np] . identifier[float64] )
identifier[x] , identifier[y] = identifier[pos] . identifier[T]
identifier[x] = identifier[x] [:, identifier[np] . identifier[newaxis] ]
identifier[y] = identifier[y] [:, identifier[np] . identifier[newaxis] ]
identifier[w] , identifier[h] = identifier[size] keyword[if] identifier[size] keyword[is] keyword[not] keyword[None] keyword[else] identifier[_get_box_size] ( identifier[x] , identifier[y] , identifier[margin] = identifier[margin] )
identifier[x0] , identifier[y0] = identifier[x] - identifier[w] , identifier[y] - identifier[h]
identifier[x1] , identifier[y1] = identifier[x] + identifier[w] , identifier[y] + identifier[h]
identifier[x0min] , identifier[y0min] , identifier[x1max] , identifier[y1max] = identifier[x0] . identifier[min] (), identifier[y0] . identifier[min] (), identifier[x1] . identifier[max] (), identifier[y1] . identifier[max] ()
keyword[if] keyword[not] identifier[keep_aspect_ratio] :
identifier[b] =( identifier[x0min] , identifier[y0min] , identifier[x1max] , identifier[y1max] )
keyword[else] :
identifier[dx] = identifier[x1max] - identifier[x0min]
identifier[dy] = identifier[y1max] - identifier[y0min]
keyword[if] identifier[dx] > identifier[dy] :
identifier[b] =( identifier[x0min] ,( identifier[y1max] + identifier[y0min] )/ literal[int] - identifier[dx] / literal[int] ,
identifier[x1max] ,( identifier[y1max] + identifier[y0min] )/ literal[int] + identifier[dx] / literal[int] )
keyword[else] :
identifier[b] =(( identifier[x1max] + identifier[x0min] )/ literal[int] - identifier[dy] / literal[int] , identifier[y0min] ,
( identifier[x1max] + identifier[x0min] )/ literal[int] + identifier[dy] / literal[int] , identifier[y1max] )
identifier[r] = identifier[Range] ( identifier[from_bounds] = identifier[b] ,
identifier[to_bounds] =(- literal[int] ,- literal[int] , literal[int] , literal[int] ))
keyword[return] identifier[np] . identifier[c_] [ identifier[r] . identifier[apply] ( identifier[np] . identifier[c_] [ identifier[x0] , identifier[y0] ]), identifier[r] . identifier[apply] ( identifier[np] . identifier[c_] [ identifier[x1] , identifier[y1] ])] | def _get_boxes(pos, size=None, margin=0, keep_aspect_ratio=True):
"""Generate non-overlapping boxes in NDC from a set of positions."""
# Get x, y.
pos = np.asarray(pos, dtype=np.float64)
(x, y) = pos.T
x = x[:, np.newaxis]
y = y[:, np.newaxis]
(w, h) = size if size is not None else _get_box_size(x, y, margin=margin)
(x0, y0) = (x - w, y - h)
(x1, y1) = (x + w, y + h)
# Renormalize the whole thing by keeping the aspect ratio.
(x0min, y0min, x1max, y1max) = (x0.min(), y0.min(), x1.max(), y1.max())
if not keep_aspect_ratio:
b = (x0min, y0min, x1max, y1max) # depends on [control=['if'], data=[]]
else:
dx = x1max - x0min
dy = y1max - y0min
if dx > dy:
b = (x0min, (y1max + y0min) / 2.0 - dx / 2.0, x1max, (y1max + y0min) / 2.0 + dx / 2.0) # depends on [control=['if'], data=['dx']]
else:
b = ((x1max + x0min) / 2.0 - dy / 2.0, y0min, (x1max + x0min) / 2.0 + dy / 2.0, y1max)
r = Range(from_bounds=b, to_bounds=(-1, -1, 1, 1))
return np.c_[r.apply(np.c_[x0, y0]), r.apply(np.c_[x1, y1])] |
def load_any_file(filename):
"""
Attempts to load filename by trial-and-error
Returns:
file: A DataFile descendant, whose specific class depends on the file format detected, or None
if the file canonot be loaded
"""
import f311
# Splits attempts using ((binary X text) file) criterion
if a99.is_text_file(filename):
return load_with_classes(filename, f311.classes_txt())
else:
return load_with_classes(filename, f311.classes_bin()) | def function[load_any_file, parameter[filename]]:
constant[
Attempts to load filename by trial-and-error
Returns:
file: A DataFile descendant, whose specific class depends on the file format detected, or None
if the file canonot be loaded
]
import module[f311]
if call[name[a99].is_text_file, parameter[name[filename]]] begin[:]
return[call[name[load_with_classes], parameter[name[filename], call[name[f311].classes_txt, parameter[]]]]] | keyword[def] identifier[load_any_file] ( identifier[filename] ):
literal[string]
keyword[import] identifier[f311]
keyword[if] identifier[a99] . identifier[is_text_file] ( identifier[filename] ):
keyword[return] identifier[load_with_classes] ( identifier[filename] , identifier[f311] . identifier[classes_txt] ())
keyword[else] :
keyword[return] identifier[load_with_classes] ( identifier[filename] , identifier[f311] . identifier[classes_bin] ()) | def load_any_file(filename):
"""
Attempts to load filename by trial-and-error
Returns:
file: A DataFile descendant, whose specific class depends on the file format detected, or None
if the file canonot be loaded
"""
import f311
# Splits attempts using ((binary X text) file) criterion
if a99.is_text_file(filename):
return load_with_classes(filename, f311.classes_txt()) # depends on [control=['if'], data=[]]
else:
return load_with_classes(filename, f311.classes_bin()) |
def get_meta_type_by_name(name):
data = get_default_metadata_data()
child_data = get_child_metadata_data()
for item in data["metadataObjects"]:
if 'xmlName' in item and item['xmlName'] == name:
return item
for item in child_data:
if 'xmlName' in item and item['xmlName'] == name:
return item
'''
> quick and dirty fix for users experiencing issues with "newer" metadata types not properly tested by mm
> if the project has a cached .describe, let's use that to detect metadata types
'''
try:
if config.describe_data != None:
project_org_describe = config.describe_data
if config.project != None and os.path.isfile(os.path.join(config.project.location,'config','.describe')):
project_org_describe = parse_json_from_file(os.path.join(config.project.location,'config','.describe'))
if project_org_describe != None and 'metadataObjects' in project_org_describe:
for item in project_org_describe["metadataObjects"]:
if 'xmlName' in item and item['xmlName'] == name:
return item
except:
pass | def function[get_meta_type_by_name, parameter[name]]:
variable[data] assign[=] call[name[get_default_metadata_data], parameter[]]
variable[child_data] assign[=] call[name[get_child_metadata_data], parameter[]]
for taget[name[item]] in starred[call[name[data]][constant[metadataObjects]]] begin[:]
if <ast.BoolOp object at 0x7da1b28c6200> begin[:]
return[name[item]]
for taget[name[item]] in starred[name[child_data]] begin[:]
if <ast.BoolOp object at 0x7da2041da6e0> begin[:]
return[name[item]]
constant[
> quick and dirty fix for users experiencing issues with "newer" metadata types not properly tested by mm
> if the project has a cached .describe, let's use that to detect metadata types
]
<ast.Try object at 0x7da2041d9ba0> | keyword[def] identifier[get_meta_type_by_name] ( identifier[name] ):
identifier[data] = identifier[get_default_metadata_data] ()
identifier[child_data] = identifier[get_child_metadata_data] ()
keyword[for] identifier[item] keyword[in] identifier[data] [ literal[string] ]:
keyword[if] literal[string] keyword[in] identifier[item] keyword[and] identifier[item] [ literal[string] ]== identifier[name] :
keyword[return] identifier[item]
keyword[for] identifier[item] keyword[in] identifier[child_data] :
keyword[if] literal[string] keyword[in] identifier[item] keyword[and] identifier[item] [ literal[string] ]== identifier[name] :
keyword[return] identifier[item]
literal[string]
keyword[try] :
keyword[if] identifier[config] . identifier[describe_data] != keyword[None] :
identifier[project_org_describe] = identifier[config] . identifier[describe_data]
keyword[if] identifier[config] . identifier[project] != keyword[None] keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[config] . identifier[project] . identifier[location] , literal[string] , literal[string] )):
identifier[project_org_describe] = identifier[parse_json_from_file] ( identifier[os] . identifier[path] . identifier[join] ( identifier[config] . identifier[project] . identifier[location] , literal[string] , literal[string] ))
keyword[if] identifier[project_org_describe] != keyword[None] keyword[and] literal[string] keyword[in] identifier[project_org_describe] :
keyword[for] identifier[item] keyword[in] identifier[project_org_describe] [ literal[string] ]:
keyword[if] literal[string] keyword[in] identifier[item] keyword[and] identifier[item] [ literal[string] ]== identifier[name] :
keyword[return] identifier[item]
keyword[except] :
keyword[pass] | def get_meta_type_by_name(name):
data = get_default_metadata_data()
child_data = get_child_metadata_data()
for item in data['metadataObjects']:
if 'xmlName' in item and item['xmlName'] == name:
return item # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
for item in child_data:
if 'xmlName' in item and item['xmlName'] == name:
return item # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
'\n > quick and dirty fix for users experiencing issues with "newer" metadata types not properly tested by mm\n > if the project has a cached .describe, let\'s use that to detect metadata types\n '
try:
if config.describe_data != None:
project_org_describe = config.describe_data # depends on [control=['if'], data=[]]
if config.project != None and os.path.isfile(os.path.join(config.project.location, 'config', '.describe')):
project_org_describe = parse_json_from_file(os.path.join(config.project.location, 'config', '.describe')) # depends on [control=['if'], data=[]]
if project_org_describe != None and 'metadataObjects' in project_org_describe:
for item in project_org_describe['metadataObjects']:
if 'xmlName' in item and item['xmlName'] == name:
return item # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] |
def difference_update(self, other):
"""Update self to include only the difference with other."""
other = set(other)
indices_to_delete = set()
for i, elem in enumerate(self):
if elem in other:
indices_to_delete.add(i)
if indices_to_delete:
self._delete_values_by_index(indices_to_delete) | def function[difference_update, parameter[self, other]]:
constant[Update self to include only the difference with other.]
variable[other] assign[=] call[name[set], parameter[name[other]]]
variable[indices_to_delete] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b26a7b20>, <ast.Name object at 0x7da1b26a7d30>]]] in starred[call[name[enumerate], parameter[name[self]]]] begin[:]
if compare[name[elem] in name[other]] begin[:]
call[name[indices_to_delete].add, parameter[name[i]]]
if name[indices_to_delete] begin[:]
call[name[self]._delete_values_by_index, parameter[name[indices_to_delete]]] | keyword[def] identifier[difference_update] ( identifier[self] , identifier[other] ):
literal[string]
identifier[other] = identifier[set] ( identifier[other] )
identifier[indices_to_delete] = identifier[set] ()
keyword[for] identifier[i] , identifier[elem] keyword[in] identifier[enumerate] ( identifier[self] ):
keyword[if] identifier[elem] keyword[in] identifier[other] :
identifier[indices_to_delete] . identifier[add] ( identifier[i] )
keyword[if] identifier[indices_to_delete] :
identifier[self] . identifier[_delete_values_by_index] ( identifier[indices_to_delete] ) | def difference_update(self, other):
"""Update self to include only the difference with other."""
other = set(other)
indices_to_delete = set()
for (i, elem) in enumerate(self):
if elem in other:
indices_to_delete.add(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if indices_to_delete:
self._delete_values_by_index(indices_to_delete) # depends on [control=['if'], data=[]] |
def _set_ldp_protocol_stats_instance_total(self, v, load=False):
"""
Setter method for ldp_protocol_stats_instance_total, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_stats_instance_total (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_protocol_stats_instance_total is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_protocol_stats_instance_total() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ldp_protocol_stats_instance_total.ldp_protocol_stats_instance_total, is_container='container', presence=False, yang_name="ldp-protocol-stats-instance-total", rest_name="ldp-protocol-stats-instance-total", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-protocol-stats-instance-ldp-protocol-stats-instance-total-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ldp_protocol_stats_instance_total must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ldp_protocol_stats_instance_total.ldp_protocol_stats_instance_total, is_container='container', presence=False, yang_name="ldp-protocol-stats-instance-total", rest_name="ldp-protocol-stats-instance-total", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-protocol-stats-instance-ldp-protocol-stats-instance-total-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__ldp_protocol_stats_instance_total = t
if hasattr(self, '_set'):
self._set() | def function[_set_ldp_protocol_stats_instance_total, parameter[self, v, load]]:
constant[
Setter method for ldp_protocol_stats_instance_total, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_stats_instance_total (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_protocol_stats_instance_total is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_protocol_stats_instance_total() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18f00ff40>
name[self].__ldp_protocol_stats_instance_total assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_ldp_protocol_stats_instance_total] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[ldp_protocol_stats_instance_total] . identifier[ldp_protocol_stats_instance_total] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[False] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__ldp_protocol_stats_instance_total] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_ldp_protocol_stats_instance_total(self, v, load=False):
"""
Setter method for ldp_protocol_stats_instance_total, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_stats_instance_total (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_protocol_stats_instance_total is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_protocol_stats_instance_total() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=ldp_protocol_stats_instance_total.ldp_protocol_stats_instance_total, is_container='container', presence=False, yang_name='ldp-protocol-stats-instance-total', rest_name='ldp-protocol-stats-instance-total', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-protocol-stats-instance-ldp-protocol-stats-instance-total-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'ldp_protocol_stats_instance_total must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=ldp_protocol_stats_instance_total.ldp_protocol_stats_instance_total, is_container=\'container\', presence=False, yang_name="ldp-protocol-stats-instance-total", rest_name="ldp-protocol-stats-instance-total", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'mpls-ldp-protocol-stats-instance-ldp-protocol-stats-instance-total-1\'}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls-operational\', defining_module=\'brocade-mpls-operational\', yang_type=\'container\', is_config=False)'}) # depends on [control=['except'], data=[]]
self.__ldp_protocol_stats_instance_total = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def analysis(self):
"""Get musical analysis of the song using the librosa library
"""
if self._analysis is not None:
return self._analysis
if self.cache_dir is not None:
path = os.path.join(self.cache_dir, self.checksum)
try:
if self.refresh_cache: raise IOError
with open(path + '.pickle', 'rb') as pickle_file:
self._analysis = pickle.load(pickle_file)
except IOError:
self._analysis = librosa_analysis.analyze_frames(self.all_as_mono(), self.samplerate)
with open(path + '.pickle', 'wb') as pickle_file:
pickle.dump(self._analysis, pickle_file, pickle.HIGHEST_PROTOCOL)
else:
self._analysis = librosa_analysis.analyze_frames(self.all_as_mono(), self.samplerate)
return self._analysis | def function[analysis, parameter[self]]:
constant[Get musical analysis of the song using the librosa library
]
if compare[name[self]._analysis is_not constant[None]] begin[:]
return[name[self]._analysis]
if compare[name[self].cache_dir is_not constant[None]] begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[self].cache_dir, name[self].checksum]]
<ast.Try object at 0x7da1b2346bc0>
return[name[self]._analysis] | keyword[def] identifier[analysis] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_analysis] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_analysis]
keyword[if] identifier[self] . identifier[cache_dir] keyword[is] keyword[not] keyword[None] :
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[cache_dir] , identifier[self] . identifier[checksum] )
keyword[try] :
keyword[if] identifier[self] . identifier[refresh_cache] : keyword[raise] identifier[IOError]
keyword[with] identifier[open] ( identifier[path] + literal[string] , literal[string] ) keyword[as] identifier[pickle_file] :
identifier[self] . identifier[_analysis] = identifier[pickle] . identifier[load] ( identifier[pickle_file] )
keyword[except] identifier[IOError] :
identifier[self] . identifier[_analysis] = identifier[librosa_analysis] . identifier[analyze_frames] ( identifier[self] . identifier[all_as_mono] (), identifier[self] . identifier[samplerate] )
keyword[with] identifier[open] ( identifier[path] + literal[string] , literal[string] ) keyword[as] identifier[pickle_file] :
identifier[pickle] . identifier[dump] ( identifier[self] . identifier[_analysis] , identifier[pickle_file] , identifier[pickle] . identifier[HIGHEST_PROTOCOL] )
keyword[else] :
identifier[self] . identifier[_analysis] = identifier[librosa_analysis] . identifier[analyze_frames] ( identifier[self] . identifier[all_as_mono] (), identifier[self] . identifier[samplerate] )
keyword[return] identifier[self] . identifier[_analysis] | def analysis(self):
"""Get musical analysis of the song using the librosa library
"""
if self._analysis is not None:
return self._analysis # depends on [control=['if'], data=[]]
if self.cache_dir is not None:
path = os.path.join(self.cache_dir, self.checksum)
try:
if self.refresh_cache:
raise IOError # depends on [control=['if'], data=[]]
with open(path + '.pickle', 'rb') as pickle_file:
self._analysis = pickle.load(pickle_file) # depends on [control=['with'], data=['pickle_file']] # depends on [control=['try'], data=[]]
except IOError:
self._analysis = librosa_analysis.analyze_frames(self.all_as_mono(), self.samplerate)
with open(path + '.pickle', 'wb') as pickle_file:
pickle.dump(self._analysis, pickle_file, pickle.HIGHEST_PROTOCOL) # depends on [control=['with'], data=['pickle_file']] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
self._analysis = librosa_analysis.analyze_frames(self.all_as_mono(), self.samplerate)
return self._analysis |
def json_http_resp(handler):
"""
Automatically serialize return value to the body of a successfull HTTP
response.
Returns a 500 error if the response cannot be serialized
Usage::
>>> from lambda_decorators import json_http_resp
>>> @json_http_resp
... def handler(event, context):
... return {'hello': 'world'}
>>> handler({}, object())
{'statusCode': 200, 'body': '{"hello": "world"}'}
in this example, the decorated handler returns:
.. code:: python
{'statusCode': 200, 'body': '{"hello": "world"}'}
"""
@wraps(handler)
def wrapper(event, context):
response = handler(event, context)
try:
body = json.dumps(response)
except Exception as exception:
return {'statusCode': 500, 'body': str(exception)}
return {'statusCode': 200, 'body': body}
return wrapper | def function[json_http_resp, parameter[handler]]:
constant[
Automatically serialize return value to the body of a successfull HTTP
response.
Returns a 500 error if the response cannot be serialized
Usage::
>>> from lambda_decorators import json_http_resp
>>> @json_http_resp
... def handler(event, context):
... return {'hello': 'world'}
>>> handler({}, object())
{'statusCode': 200, 'body': '{"hello": "world"}'}
in this example, the decorated handler returns:
.. code:: python
{'statusCode': 200, 'body': '{"hello": "world"}'}
]
def function[wrapper, parameter[event, context]]:
variable[response] assign[=] call[name[handler], parameter[name[event], name[context]]]
<ast.Try object at 0x7da18ede72e0>
return[dictionary[[<ast.Constant object at 0x7da18ede4df0>, <ast.Constant object at 0x7da18ede46d0>], [<ast.Constant object at 0x7da18ede6200>, <ast.Name object at 0x7da18ede43a0>]]]
return[name[wrapper]] | keyword[def] identifier[json_http_resp] ( identifier[handler] ):
literal[string]
@ identifier[wraps] ( identifier[handler] )
keyword[def] identifier[wrapper] ( identifier[event] , identifier[context] ):
identifier[response] = identifier[handler] ( identifier[event] , identifier[context] )
keyword[try] :
identifier[body] = identifier[json] . identifier[dumps] ( identifier[response] )
keyword[except] identifier[Exception] keyword[as] identifier[exception] :
keyword[return] { literal[string] : literal[int] , literal[string] : identifier[str] ( identifier[exception] )}
keyword[return] { literal[string] : literal[int] , literal[string] : identifier[body] }
keyword[return] identifier[wrapper] | def json_http_resp(handler):
"""
Automatically serialize return value to the body of a successfull HTTP
response.
Returns a 500 error if the response cannot be serialized
Usage::
>>> from lambda_decorators import json_http_resp
>>> @json_http_resp
... def handler(event, context):
... return {'hello': 'world'}
>>> handler({}, object())
{'statusCode': 200, 'body': '{"hello": "world"}'}
in this example, the decorated handler returns:
.. code:: python
{'statusCode': 200, 'body': '{"hello": "world"}'}
"""
@wraps(handler)
def wrapper(event, context):
response = handler(event, context)
try:
body = json.dumps(response) # depends on [control=['try'], data=[]]
except Exception as exception:
return {'statusCode': 500, 'body': str(exception)} # depends on [control=['except'], data=['exception']]
return {'statusCode': 200, 'body': body}
return wrapper |
def glyph(dataset, orient=True, scale=True, factor=1.0, geom=None):
"""
Copies a geometric representation (called a glyph) to every
point in the input dataset. The glyph may be oriented along
the input vectors, and it may be scaled according to scalar
data or vector magnitude.
Parameters
----------
orient : bool
Use the active vectors array to orient the the glyphs
scale : bool
Use the active scalars to scale the glyphs
factor : float
Scale factor applied to sclaing array
geom : vtk.vtkDataSet
The geometry to use for the glyph
"""
if geom is None:
arrow = vtk.vtkArrowSource()
arrow.Update()
geom = arrow.GetOutput()
alg = vtk.vtkGlyph3D()
alg.SetSourceData(geom)
if isinstance(scale, str):
dataset.active_scalar_name = scale
scale = True
if scale:
if dataset.active_scalar is not None:
if dataset.active_scalar.ndim > 1:
alg.SetScaleModeToScaleByVector()
else:
alg.SetScaleModeToScaleByScalar()
if isinstance(orient, str):
dataset.active_vectors_name = orient
orient = True
alg.SetOrient(orient)
alg.SetInputData(dataset)
alg.SetVectorModeToUseVector()
alg.SetScaleFactor(factor)
alg.Update()
return _get_output(alg) | def function[glyph, parameter[dataset, orient, scale, factor, geom]]:
constant[
Copies a geometric representation (called a glyph) to every
point in the input dataset. The glyph may be oriented along
the input vectors, and it may be scaled according to scalar
data or vector magnitude.
Parameters
----------
orient : bool
Use the active vectors array to orient the the glyphs
scale : bool
Use the active scalars to scale the glyphs
factor : float
Scale factor applied to sclaing array
geom : vtk.vtkDataSet
The geometry to use for the glyph
]
if compare[name[geom] is constant[None]] begin[:]
variable[arrow] assign[=] call[name[vtk].vtkArrowSource, parameter[]]
call[name[arrow].Update, parameter[]]
variable[geom] assign[=] call[name[arrow].GetOutput, parameter[]]
variable[alg] assign[=] call[name[vtk].vtkGlyph3D, parameter[]]
call[name[alg].SetSourceData, parameter[name[geom]]]
if call[name[isinstance], parameter[name[scale], name[str]]] begin[:]
name[dataset].active_scalar_name assign[=] name[scale]
variable[scale] assign[=] constant[True]
if name[scale] begin[:]
if compare[name[dataset].active_scalar is_not constant[None]] begin[:]
if compare[name[dataset].active_scalar.ndim greater[>] constant[1]] begin[:]
call[name[alg].SetScaleModeToScaleByVector, parameter[]]
if call[name[isinstance], parameter[name[orient], name[str]]] begin[:]
name[dataset].active_vectors_name assign[=] name[orient]
variable[orient] assign[=] constant[True]
call[name[alg].SetOrient, parameter[name[orient]]]
call[name[alg].SetInputData, parameter[name[dataset]]]
call[name[alg].SetVectorModeToUseVector, parameter[]]
call[name[alg].SetScaleFactor, parameter[name[factor]]]
call[name[alg].Update, parameter[]]
return[call[name[_get_output], parameter[name[alg]]]] | keyword[def] identifier[glyph] ( identifier[dataset] , identifier[orient] = keyword[True] , identifier[scale] = keyword[True] , identifier[factor] = literal[int] , identifier[geom] = keyword[None] ):
literal[string]
keyword[if] identifier[geom] keyword[is] keyword[None] :
identifier[arrow] = identifier[vtk] . identifier[vtkArrowSource] ()
identifier[arrow] . identifier[Update] ()
identifier[geom] = identifier[arrow] . identifier[GetOutput] ()
identifier[alg] = identifier[vtk] . identifier[vtkGlyph3D] ()
identifier[alg] . identifier[SetSourceData] ( identifier[geom] )
keyword[if] identifier[isinstance] ( identifier[scale] , identifier[str] ):
identifier[dataset] . identifier[active_scalar_name] = identifier[scale]
identifier[scale] = keyword[True]
keyword[if] identifier[scale] :
keyword[if] identifier[dataset] . identifier[active_scalar] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[dataset] . identifier[active_scalar] . identifier[ndim] > literal[int] :
identifier[alg] . identifier[SetScaleModeToScaleByVector] ()
keyword[else] :
identifier[alg] . identifier[SetScaleModeToScaleByScalar] ()
keyword[if] identifier[isinstance] ( identifier[orient] , identifier[str] ):
identifier[dataset] . identifier[active_vectors_name] = identifier[orient]
identifier[orient] = keyword[True]
identifier[alg] . identifier[SetOrient] ( identifier[orient] )
identifier[alg] . identifier[SetInputData] ( identifier[dataset] )
identifier[alg] . identifier[SetVectorModeToUseVector] ()
identifier[alg] . identifier[SetScaleFactor] ( identifier[factor] )
identifier[alg] . identifier[Update] ()
keyword[return] identifier[_get_output] ( identifier[alg] ) | def glyph(dataset, orient=True, scale=True, factor=1.0, geom=None):
"""
Copies a geometric representation (called a glyph) to every
point in the input dataset. The glyph may be oriented along
the input vectors, and it may be scaled according to scalar
data or vector magnitude.
Parameters
----------
orient : bool
Use the active vectors array to orient the the glyphs
scale : bool
Use the active scalars to scale the glyphs
factor : float
Scale factor applied to sclaing array
geom : vtk.vtkDataSet
The geometry to use for the glyph
"""
if geom is None:
arrow = vtk.vtkArrowSource()
arrow.Update()
geom = arrow.GetOutput() # depends on [control=['if'], data=['geom']]
alg = vtk.vtkGlyph3D()
alg.SetSourceData(geom)
if isinstance(scale, str):
dataset.active_scalar_name = scale
scale = True # depends on [control=['if'], data=[]]
if scale:
if dataset.active_scalar is not None:
if dataset.active_scalar.ndim > 1:
alg.SetScaleModeToScaleByVector() # depends on [control=['if'], data=[]]
else:
alg.SetScaleModeToScaleByScalar() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if isinstance(orient, str):
dataset.active_vectors_name = orient
orient = True # depends on [control=['if'], data=[]]
alg.SetOrient(orient)
alg.SetInputData(dataset)
alg.SetVectorModeToUseVector()
alg.SetScaleFactor(factor)
alg.Update()
return _get_output(alg) |
def QoI_Dist(poly, dist, sample=10000, **kws):
"""
Constructs distributions for the quantity of interests.
The function constructs a kernel density estimator (KDE) for each
polynomial (poly) by sampling it. With the KDEs, distributions (Dists) are
constructed. The Dists can be used for e.g. plotting probability density
functions (PDF), or to make a second uncertainty quantification simulation
with that newly generated Dists.
Args:
poly (Poly):
Polynomial of interest.
dist (Dist):
Defines the space where the samples for the KDE is taken from the
poly.
sample (int):
Number of samples used in estimation to construct the KDE.
Returns:
(numpy.ndarray):
The constructed quantity of interest (QoI) distributions, where
``qoi_dists.shape==poly.shape``.
Examples:
>>> dist = chaospy.Normal(0, 1)
>>> x = chaospy.variable(1)
>>> poly = chaospy.Poly([x])
>>> qoi_dist = chaospy.QoI_Dist(poly, dist)
>>> values = qoi_dist[0].pdf([-0.75, 0., 0.75])
>>> print(numpy.around(values, 8))
[0.29143037 0.39931708 0.29536329]
"""
shape = poly.shape
poly = polynomials.flatten(poly)
dim = len(dist)
#sample from the inumpyut dist
samples = dist.sample(sample, **kws)
qoi_dists = []
for i in range(0, len(poly)):
#sample the polynomial solution
if dim == 1:
dataset = poly[i](samples)
else:
dataset = poly[i](*samples)
lo = dataset.min()
up = dataset.max()
#creates qoi_dist
qoi_dist = distributions.SampleDist(dataset, lo, up)
qoi_dists.append(qoi_dist)
#reshape the qoi_dists to match the shape of the inumpyut poly
qoi_dists = numpy.array(qoi_dists, distributions.Dist)
qoi_dists = qoi_dists.reshape(shape)
if not shape:
qoi_dists = qoi_dists.item()
return qoi_dists | def function[QoI_Dist, parameter[poly, dist, sample]]:
constant[
Constructs distributions for the quantity of interests.
The function constructs a kernel density estimator (KDE) for each
polynomial (poly) by sampling it. With the KDEs, distributions (Dists) are
constructed. The Dists can be used for e.g. plotting probability density
functions (PDF), or to make a second uncertainty quantification simulation
with that newly generated Dists.
Args:
poly (Poly):
Polynomial of interest.
dist (Dist):
Defines the space where the samples for the KDE is taken from the
poly.
sample (int):
Number of samples used in estimation to construct the KDE.
Returns:
(numpy.ndarray):
The constructed quantity of interest (QoI) distributions, where
``qoi_dists.shape==poly.shape``.
Examples:
>>> dist = chaospy.Normal(0, 1)
>>> x = chaospy.variable(1)
>>> poly = chaospy.Poly([x])
>>> qoi_dist = chaospy.QoI_Dist(poly, dist)
>>> values = qoi_dist[0].pdf([-0.75, 0., 0.75])
>>> print(numpy.around(values, 8))
[0.29143037 0.39931708 0.29536329]
]
variable[shape] assign[=] name[poly].shape
variable[poly] assign[=] call[name[polynomials].flatten, parameter[name[poly]]]
variable[dim] assign[=] call[name[len], parameter[name[dist]]]
variable[samples] assign[=] call[name[dist].sample, parameter[name[sample]]]
variable[qoi_dists] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[poly]]]]]] begin[:]
if compare[name[dim] equal[==] constant[1]] begin[:]
variable[dataset] assign[=] call[call[name[poly]][name[i]], parameter[name[samples]]]
variable[lo] assign[=] call[name[dataset].min, parameter[]]
variable[up] assign[=] call[name[dataset].max, parameter[]]
variable[qoi_dist] assign[=] call[name[distributions].SampleDist, parameter[name[dataset], name[lo], name[up]]]
call[name[qoi_dists].append, parameter[name[qoi_dist]]]
variable[qoi_dists] assign[=] call[name[numpy].array, parameter[name[qoi_dists], name[distributions].Dist]]
variable[qoi_dists] assign[=] call[name[qoi_dists].reshape, parameter[name[shape]]]
if <ast.UnaryOp object at 0x7da207f02d70> begin[:]
variable[qoi_dists] assign[=] call[name[qoi_dists].item, parameter[]]
return[name[qoi_dists]] | keyword[def] identifier[QoI_Dist] ( identifier[poly] , identifier[dist] , identifier[sample] = literal[int] ,** identifier[kws] ):
literal[string]
identifier[shape] = identifier[poly] . identifier[shape]
identifier[poly] = identifier[polynomials] . identifier[flatten] ( identifier[poly] )
identifier[dim] = identifier[len] ( identifier[dist] )
identifier[samples] = identifier[dist] . identifier[sample] ( identifier[sample] ,** identifier[kws] )
identifier[qoi_dists] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[poly] )):
keyword[if] identifier[dim] == literal[int] :
identifier[dataset] = identifier[poly] [ identifier[i] ]( identifier[samples] )
keyword[else] :
identifier[dataset] = identifier[poly] [ identifier[i] ](* identifier[samples] )
identifier[lo] = identifier[dataset] . identifier[min] ()
identifier[up] = identifier[dataset] . identifier[max] ()
identifier[qoi_dist] = identifier[distributions] . identifier[SampleDist] ( identifier[dataset] , identifier[lo] , identifier[up] )
identifier[qoi_dists] . identifier[append] ( identifier[qoi_dist] )
identifier[qoi_dists] = identifier[numpy] . identifier[array] ( identifier[qoi_dists] , identifier[distributions] . identifier[Dist] )
identifier[qoi_dists] = identifier[qoi_dists] . identifier[reshape] ( identifier[shape] )
keyword[if] keyword[not] identifier[shape] :
identifier[qoi_dists] = identifier[qoi_dists] . identifier[item] ()
keyword[return] identifier[qoi_dists] | def QoI_Dist(poly, dist, sample=10000, **kws):
"""
Constructs distributions for the quantity of interests.
The function constructs a kernel density estimator (KDE) for each
polynomial (poly) by sampling it. With the KDEs, distributions (Dists) are
constructed. The Dists can be used for e.g. plotting probability density
functions (PDF), or to make a second uncertainty quantification simulation
with that newly generated Dists.
Args:
poly (Poly):
Polynomial of interest.
dist (Dist):
Defines the space where the samples for the KDE is taken from the
poly.
sample (int):
Number of samples used in estimation to construct the KDE.
Returns:
(numpy.ndarray):
The constructed quantity of interest (QoI) distributions, where
``qoi_dists.shape==poly.shape``.
Examples:
>>> dist = chaospy.Normal(0, 1)
>>> x = chaospy.variable(1)
>>> poly = chaospy.Poly([x])
>>> qoi_dist = chaospy.QoI_Dist(poly, dist)
>>> values = qoi_dist[0].pdf([-0.75, 0., 0.75])
>>> print(numpy.around(values, 8))
[0.29143037 0.39931708 0.29536329]
"""
shape = poly.shape
poly = polynomials.flatten(poly)
dim = len(dist)
#sample from the inumpyut dist
samples = dist.sample(sample, **kws)
qoi_dists = []
for i in range(0, len(poly)):
#sample the polynomial solution
if dim == 1:
dataset = poly[i](samples) # depends on [control=['if'], data=[]]
else:
dataset = poly[i](*samples)
lo = dataset.min()
up = dataset.max()
#creates qoi_dist
qoi_dist = distributions.SampleDist(dataset, lo, up)
qoi_dists.append(qoi_dist) # depends on [control=['for'], data=['i']]
#reshape the qoi_dists to match the shape of the inumpyut poly
qoi_dists = numpy.array(qoi_dists, distributions.Dist)
qoi_dists = qoi_dists.reshape(shape)
if not shape:
qoi_dists = qoi_dists.item() # depends on [control=['if'], data=[]]
return qoi_dists |
async def numbered_page(self):
"""lets you type a page number to go to"""
to_delete = []
to_delete.append(await self.bot.send_message(self.message.channel, 'What page do you want to go to?'))
msg = await self.bot.wait_for_message(author=self.author, channel=self.message.channel,
check=lambda m: m.content.isdigit(), timeout=30.0)
if msg is not None:
page = int(msg.content)
to_delete.append(msg)
if page != 0 and page <= self.maximum_pages:
await self.show_page(page)
else:
to_delete.append(await self.bot.say('Invalid page given. (%s/%s)' % (page, self.maximum_pages)))
await asyncio.sleep(5)
else:
to_delete.append(await self.bot.send_message(self.message.channel, 'Took too long.'))
await asyncio.sleep(5)
try:
await self.bot.delete_messages(to_delete)
except Exception:
pass | <ast.AsyncFunctionDef object at 0x7da1b2818610> | keyword[async] keyword[def] identifier[numbered_page] ( identifier[self] ):
literal[string]
identifier[to_delete] =[]
identifier[to_delete] . identifier[append] ( keyword[await] identifier[self] . identifier[bot] . identifier[send_message] ( identifier[self] . identifier[message] . identifier[channel] , literal[string] ))
identifier[msg] = keyword[await] identifier[self] . identifier[bot] . identifier[wait_for_message] ( identifier[author] = identifier[self] . identifier[author] , identifier[channel] = identifier[self] . identifier[message] . identifier[channel] ,
identifier[check] = keyword[lambda] identifier[m] : identifier[m] . identifier[content] . identifier[isdigit] (), identifier[timeout] = literal[int] )
keyword[if] identifier[msg] keyword[is] keyword[not] keyword[None] :
identifier[page] = identifier[int] ( identifier[msg] . identifier[content] )
identifier[to_delete] . identifier[append] ( identifier[msg] )
keyword[if] identifier[page] != literal[int] keyword[and] identifier[page] <= identifier[self] . identifier[maximum_pages] :
keyword[await] identifier[self] . identifier[show_page] ( identifier[page] )
keyword[else] :
identifier[to_delete] . identifier[append] ( keyword[await] identifier[self] . identifier[bot] . identifier[say] ( literal[string] %( identifier[page] , identifier[self] . identifier[maximum_pages] )))
keyword[await] identifier[asyncio] . identifier[sleep] ( literal[int] )
keyword[else] :
identifier[to_delete] . identifier[append] ( keyword[await] identifier[self] . identifier[bot] . identifier[send_message] ( identifier[self] . identifier[message] . identifier[channel] , literal[string] ))
keyword[await] identifier[asyncio] . identifier[sleep] ( literal[int] )
keyword[try] :
keyword[await] identifier[self] . identifier[bot] . identifier[delete_messages] ( identifier[to_delete] )
keyword[except] identifier[Exception] :
keyword[pass] | async def numbered_page(self):
"""lets you type a page number to go to"""
to_delete = []
to_delete.append(await self.bot.send_message(self.message.channel, 'What page do you want to go to?'))
msg = await self.bot.wait_for_message(author=self.author, channel=self.message.channel, check=lambda m: m.content.isdigit(), timeout=30.0)
if msg is not None:
page = int(msg.content)
to_delete.append(msg)
if page != 0 and page <= self.maximum_pages:
await self.show_page(page) # depends on [control=['if'], data=[]]
else:
to_delete.append(await self.bot.say('Invalid page given. (%s/%s)' % (page, self.maximum_pages)))
await asyncio.sleep(5) # depends on [control=['if'], data=['msg']]
else:
to_delete.append(await self.bot.send_message(self.message.channel, 'Took too long.'))
await asyncio.sleep(5)
try:
await self.bot.delete_messages(to_delete) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]] |
def _write_config(self, memory):
"""Write the configuration for this gate to memory."""
memory.seek(0)
memory.write(struct.pack("<5I",
# sim_length
self._simulator.length,
# input_a_key
self._inputs["a"].routing_key
if self._inputs["a"] is not None
else 0xFFFFFFFF,
# input_b_key
self._inputs["b"].routing_key
if self._inputs["b"] is not None
else 0xFFFFFFFF,
# output_key
self.output.routing_key,
# lut
self._lookup_table)) | def function[_write_config, parameter[self, memory]]:
constant[Write the configuration for this gate to memory.]
call[name[memory].seek, parameter[constant[0]]]
call[name[memory].write, parameter[call[name[struct].pack, parameter[constant[<5I], name[self]._simulator.length, <ast.IfExp object at 0x7da1b189b730>, <ast.IfExp object at 0x7da1b189b6d0>, name[self].output.routing_key, name[self]._lookup_table]]]] | keyword[def] identifier[_write_config] ( identifier[self] , identifier[memory] ):
literal[string]
identifier[memory] . identifier[seek] ( literal[int] )
identifier[memory] . identifier[write] ( identifier[struct] . identifier[pack] ( literal[string] ,
identifier[self] . identifier[_simulator] . identifier[length] ,
identifier[self] . identifier[_inputs] [ literal[string] ]. identifier[routing_key]
keyword[if] identifier[self] . identifier[_inputs] [ literal[string] ] keyword[is] keyword[not] keyword[None]
keyword[else] literal[int] ,
identifier[self] . identifier[_inputs] [ literal[string] ]. identifier[routing_key]
keyword[if] identifier[self] . identifier[_inputs] [ literal[string] ] keyword[is] keyword[not] keyword[None]
keyword[else] literal[int] ,
identifier[self] . identifier[output] . identifier[routing_key] ,
identifier[self] . identifier[_lookup_table] )) | def _write_config(self, memory):
"""Write the configuration for this gate to memory."""
memory.seek(0)
# sim_length
# input_a_key
# input_b_key
# output_key
# lut
memory.write(struct.pack('<5I', self._simulator.length, self._inputs['a'].routing_key if self._inputs['a'] is not None else 4294967295, self._inputs['b'].routing_key if self._inputs['b'] is not None else 4294967295, self.output.routing_key, self._lookup_table)) |
def region_code_for_number(numobj):
"""Returns the region where a phone number is from.
This could be used for geocoding at the region level. Only guarantees
correct results for valid, full numbers (not short-codes, or invalid
numbers).
Arguments:
numobj -- The phone number object whose origin we want to know
Returns the region where the phone number is from, or None if no region
matches this calling code.
"""
country_code = numobj.country_code
regions = COUNTRY_CODE_TO_REGION_CODE.get(country_code, None)
if regions is None:
return None
if len(regions) == 1:
return regions[0]
else:
return _region_code_for_number_from_list(numobj, regions) | def function[region_code_for_number, parameter[numobj]]:
constant[Returns the region where a phone number is from.
This could be used for geocoding at the region level. Only guarantees
correct results for valid, full numbers (not short-codes, or invalid
numbers).
Arguments:
numobj -- The phone number object whose origin we want to know
Returns the region where the phone number is from, or None if no region
matches this calling code.
]
variable[country_code] assign[=] name[numobj].country_code
variable[regions] assign[=] call[name[COUNTRY_CODE_TO_REGION_CODE].get, parameter[name[country_code], constant[None]]]
if compare[name[regions] is constant[None]] begin[:]
return[constant[None]]
if compare[call[name[len], parameter[name[regions]]] equal[==] constant[1]] begin[:]
return[call[name[regions]][constant[0]]] | keyword[def] identifier[region_code_for_number] ( identifier[numobj] ):
literal[string]
identifier[country_code] = identifier[numobj] . identifier[country_code]
identifier[regions] = identifier[COUNTRY_CODE_TO_REGION_CODE] . identifier[get] ( identifier[country_code] , keyword[None] )
keyword[if] identifier[regions] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] identifier[len] ( identifier[regions] )== literal[int] :
keyword[return] identifier[regions] [ literal[int] ]
keyword[else] :
keyword[return] identifier[_region_code_for_number_from_list] ( identifier[numobj] , identifier[regions] ) | def region_code_for_number(numobj):
"""Returns the region where a phone number is from.
This could be used for geocoding at the region level. Only guarantees
correct results for valid, full numbers (not short-codes, or invalid
numbers).
Arguments:
numobj -- The phone number object whose origin we want to know
Returns the region where the phone number is from, or None if no region
matches this calling code.
"""
country_code = numobj.country_code
regions = COUNTRY_CODE_TO_REGION_CODE.get(country_code, None)
if regions is None:
return None # depends on [control=['if'], data=[]]
if len(regions) == 1:
return regions[0] # depends on [control=['if'], data=[]]
else:
return _region_code_for_number_from_list(numobj, regions) |
def serialize(self, value, **kwargs):
"""Serialize every item of the list."""
return [self.item_type.serialize(val, **kwargs) for val in value] | def function[serialize, parameter[self, value]]:
constant[Serialize every item of the list.]
return[<ast.ListComp object at 0x7da1b1ad3970>] | keyword[def] identifier[serialize] ( identifier[self] , identifier[value] ,** identifier[kwargs] ):
literal[string]
keyword[return] [ identifier[self] . identifier[item_type] . identifier[serialize] ( identifier[val] ,** identifier[kwargs] ) keyword[for] identifier[val] keyword[in] identifier[value] ] | def serialize(self, value, **kwargs):
"""Serialize every item of the list."""
return [self.item_type.serialize(val, **kwargs) for val in value] |
def match(self, other_version):
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
"""
major, minor, patch = _str_to_version(other_version, allow_wildcard=True)
return (major in [self.major, "*"] and minor in [self.minor, "*"]
and patch in [self.patch, "*"]) | def function[match, parameter[self, other_version]]:
constant[Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
]
<ast.Tuple object at 0x7da1b1e8e5c0> assign[=] call[name[_str_to_version], parameter[name[other_version]]]
return[<ast.BoolOp object at 0x7da1b2060790>] | keyword[def] identifier[match] ( identifier[self] , identifier[other_version] ):
literal[string]
identifier[major] , identifier[minor] , identifier[patch] = identifier[_str_to_version] ( identifier[other_version] , identifier[allow_wildcard] = keyword[True] )
keyword[return] ( identifier[major] keyword[in] [ identifier[self] . identifier[major] , literal[string] ] keyword[and] identifier[minor] keyword[in] [ identifier[self] . identifier[minor] , literal[string] ]
keyword[and] identifier[patch] keyword[in] [ identifier[self] . identifier[patch] , literal[string] ]) | def match(self, other_version):
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
"""
(major, minor, patch) = _str_to_version(other_version, allow_wildcard=True)
return major in [self.major, '*'] and minor in [self.minor, '*'] and (patch in [self.patch, '*']) |
def cli_run_viz(source=None, outputpath="", theme="", verbose=False):
"""
This application is a wrapper on the main ontospy-viz script. It generates docs for all models in the local library. Using the Complex-html template..
@todo allow to pass a custom folder ..
> python -m ontospy.viz.scripts.export_all -o ~/Desktop/test/ --theme random
"""
if outputpath:
if not (os.path.exists(outputpath)) or not (os.path.isdir(outputpath)):
click.secho(
"WARNING: the -o option must include a valid directory path.",
fg="red")
sys.exit(0)
else:
from os.path import expanduser
home = expanduser("~")
outputpath = os.path.join(home, "ontospy-viz-multi")
if source:
source_folder = source[0]
if not os.path.isdir(source_folder):
click.secho(
"WARNING: '%s' is not a valid directory path." % source_folder,
fg="red")
sys.exit(0)
files_list = [
f for f in os.listdir(source_folder)
if os.path.isfile(os.path.join(source_folder, f))
]
click.secho(
"Exporting the directory: '%s'" % source_folder, fg="green")
click.secho("----------", fg="green")
else:
click.secho(
"Exporting the local library: '%s'" % get_home_location(),
fg="green")
click.secho("----------", fg="green")
files_list = get_localontologies()
source_folder = get_home_location()
report_pages = []
for onto_name in files_list:
full_uri = os.path.join(source_folder, onto_name)
if theme:
if theme == "random":
_theme = random_theme()
else:
_theme = theme
else:
_theme = BOOTSWATCH_THEME_DEFAULT
click.secho("Onto: <%s> Theme: '%s'" % (onto_name, _theme), fg="green")
printDebug("Loading graph...", dim=True)
g = Ontospy(os.path.join(source_folder, onto_name), verbose=verbose)
if g.sources:
# if Ontospy graph has no valid 'sources' = file passed was not valid RDF
printDebug("Building visualization...", dim=True)
onto_name_safe = slugify(unicode(onto_name))
onto_outputpath = os.path.join(outputpath, onto_name_safe)
# note: single static files output path
static_outputpath = os.path.join(outputpath, "static")
# v = KompleteViz(g, theme=_theme)
v = KompleteVizMultiModel(
g,
theme=_theme,
static_url="../static/",
output_path_static=static_outputpath)
try:
# note: onto_outputpath is wiped out each time as part of the build
url = v.build(onto_outputpath)
report_pages.append(
"<a href='%s/index.html' target='_blank'>%s</a> ('%s' theme)<br />"
% (onto_name_safe, onto_name, _theme))
except:
e = sys.exc_info()[0]
printDebug("Error: " + str(e), "red")
continue
# generate a report page
report_path = os.path.join(outputpath, "index.html")
html = """
<html>
<head>
<style media="screen">
a {font-size: 20px; padding: 15px; text-transform: capitalize; text-decoration: none;}
a:hover {text-decoration: underline;}
</style>
</head>
<body>
<h1>Ontospy-generated documentation:</h1>
%s
</body>
</html>
"""
with open(report_path, "w") as text_file:
text_file.write(html % ("".join([x for x in report_pages])))
# open report
webbrowser.open("file:///" + report_path)
raise SystemExit(1) | def function[cli_run_viz, parameter[source, outputpath, theme, verbose]]:
constant[
This application is a wrapper on the main ontospy-viz script. It generates docs for all models in the local library. Using the Complex-html template..
@todo allow to pass a custom folder ..
> python -m ontospy.viz.scripts.export_all -o ~/Desktop/test/ --theme random
]
if name[outputpath] begin[:]
if <ast.BoolOp object at 0x7da1b11abe20> begin[:]
call[name[click].secho, parameter[constant[WARNING: the -o option must include a valid directory path.]]]
call[name[sys].exit, parameter[constant[0]]]
if name[source] begin[:]
variable[source_folder] assign[=] call[name[source]][constant[0]]
if <ast.UnaryOp object at 0x7da1b11aa440> begin[:]
call[name[click].secho, parameter[binary_operation[constant[WARNING: '%s' is not a valid directory path.] <ast.Mod object at 0x7da2590d6920> name[source_folder]]]]
call[name[sys].exit, parameter[constant[0]]]
variable[files_list] assign[=] <ast.ListComp object at 0x7da1b11a9660>
call[name[click].secho, parameter[binary_operation[constant[Exporting the directory: '%s'] <ast.Mod object at 0x7da2590d6920> name[source_folder]]]]
call[name[click].secho, parameter[constant[----------]]]
variable[report_pages] assign[=] list[[]]
for taget[name[onto_name]] in starred[name[files_list]] begin[:]
variable[full_uri] assign[=] call[name[os].path.join, parameter[name[source_folder], name[onto_name]]]
if name[theme] begin[:]
if compare[name[theme] equal[==] constant[random]] begin[:]
variable[_theme] assign[=] call[name[random_theme], parameter[]]
call[name[click].secho, parameter[binary_operation[constant[Onto: <%s> Theme: '%s'] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b11aa4d0>, <ast.Name object at 0x7da1b11a85b0>]]]]]
call[name[printDebug], parameter[constant[Loading graph...]]]
variable[g] assign[=] call[name[Ontospy], parameter[call[name[os].path.join, parameter[name[source_folder], name[onto_name]]]]]
if name[g].sources begin[:]
call[name[printDebug], parameter[constant[Building visualization...]]]
variable[onto_name_safe] assign[=] call[name[slugify], parameter[call[name[unicode], parameter[name[onto_name]]]]]
variable[onto_outputpath] assign[=] call[name[os].path.join, parameter[name[outputpath], name[onto_name_safe]]]
variable[static_outputpath] assign[=] call[name[os].path.join, parameter[name[outputpath], constant[static]]]
variable[v] assign[=] call[name[KompleteVizMultiModel], parameter[name[g]]]
<ast.Try object at 0x7da1b12b4bb0>
variable[report_path] assign[=] call[name[os].path.join, parameter[name[outputpath], constant[index.html]]]
variable[html] assign[=] constant[
<html>
<head>
<style media="screen">
a {font-size: 20px; padding: 15px; text-transform: capitalize; text-decoration: none;}
a:hover {text-decoration: underline;}
</style>
</head>
<body>
<h1>Ontospy-generated documentation:</h1>
%s
</body>
</html>
]
with call[name[open], parameter[name[report_path], constant[w]]] begin[:]
call[name[text_file].write, parameter[binary_operation[name[html] <ast.Mod object at 0x7da2590d6920> call[constant[].join, parameter[<ast.ListComp object at 0x7da18ede66e0>]]]]]
call[name[webbrowser].open, parameter[binary_operation[constant[file:///] + name[report_path]]]]
<ast.Raise object at 0x7da18ede71f0> | keyword[def] identifier[cli_run_viz] ( identifier[source] = keyword[None] , identifier[outputpath] = literal[string] , identifier[theme] = literal[string] , identifier[verbose] = keyword[False] ):
literal[string]
keyword[if] identifier[outputpath] :
keyword[if] keyword[not] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[outputpath] )) keyword[or] keyword[not] ( identifier[os] . identifier[path] . identifier[isdir] ( identifier[outputpath] )):
identifier[click] . identifier[secho] (
literal[string] ,
identifier[fg] = literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[else] :
keyword[from] identifier[os] . identifier[path] keyword[import] identifier[expanduser]
identifier[home] = identifier[expanduser] ( literal[string] )
identifier[outputpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[home] , literal[string] )
keyword[if] identifier[source] :
identifier[source_folder] = identifier[source] [ literal[int] ]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[source_folder] ):
identifier[click] . identifier[secho] (
literal[string] % identifier[source_folder] ,
identifier[fg] = literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[files_list] =[
identifier[f] keyword[for] identifier[f] keyword[in] identifier[os] . identifier[listdir] ( identifier[source_folder] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[source_folder] , identifier[f] ))
]
identifier[click] . identifier[secho] (
literal[string] % identifier[source_folder] , identifier[fg] = literal[string] )
identifier[click] . identifier[secho] ( literal[string] , identifier[fg] = literal[string] )
keyword[else] :
identifier[click] . identifier[secho] (
literal[string] % identifier[get_home_location] (),
identifier[fg] = literal[string] )
identifier[click] . identifier[secho] ( literal[string] , identifier[fg] = literal[string] )
identifier[files_list] = identifier[get_localontologies] ()
identifier[source_folder] = identifier[get_home_location] ()
identifier[report_pages] =[]
keyword[for] identifier[onto_name] keyword[in] identifier[files_list] :
identifier[full_uri] = identifier[os] . identifier[path] . identifier[join] ( identifier[source_folder] , identifier[onto_name] )
keyword[if] identifier[theme] :
keyword[if] identifier[theme] == literal[string] :
identifier[_theme] = identifier[random_theme] ()
keyword[else] :
identifier[_theme] = identifier[theme]
keyword[else] :
identifier[_theme] = identifier[BOOTSWATCH_THEME_DEFAULT]
identifier[click] . identifier[secho] ( literal[string] %( identifier[onto_name] , identifier[_theme] ), identifier[fg] = literal[string] )
identifier[printDebug] ( literal[string] , identifier[dim] = keyword[True] )
identifier[g] = identifier[Ontospy] ( identifier[os] . identifier[path] . identifier[join] ( identifier[source_folder] , identifier[onto_name] ), identifier[verbose] = identifier[verbose] )
keyword[if] identifier[g] . identifier[sources] :
identifier[printDebug] ( literal[string] , identifier[dim] = keyword[True] )
identifier[onto_name_safe] = identifier[slugify] ( identifier[unicode] ( identifier[onto_name] ))
identifier[onto_outputpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[outputpath] , identifier[onto_name_safe] )
identifier[static_outputpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[outputpath] , literal[string] )
identifier[v] = identifier[KompleteVizMultiModel] (
identifier[g] ,
identifier[theme] = identifier[_theme] ,
identifier[static_url] = literal[string] ,
identifier[output_path_static] = identifier[static_outputpath] )
keyword[try] :
identifier[url] = identifier[v] . identifier[build] ( identifier[onto_outputpath] )
identifier[report_pages] . identifier[append] (
literal[string]
%( identifier[onto_name_safe] , identifier[onto_name] , identifier[_theme] ))
keyword[except] :
identifier[e] = identifier[sys] . identifier[exc_info] ()[ literal[int] ]
identifier[printDebug] ( literal[string] + identifier[str] ( identifier[e] ), literal[string] )
keyword[continue]
identifier[report_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[outputpath] , literal[string] )
identifier[html] = literal[string]
keyword[with] identifier[open] ( identifier[report_path] , literal[string] ) keyword[as] identifier[text_file] :
identifier[text_file] . identifier[write] ( identifier[html] %( literal[string] . identifier[join] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[report_pages] ])))
identifier[webbrowser] . identifier[open] ( literal[string] + identifier[report_path] )
keyword[raise] identifier[SystemExit] ( literal[int] ) | def cli_run_viz(source=None, outputpath='', theme='', verbose=False):
"""
This application is a wrapper on the main ontospy-viz script. It generates docs for all models in the local library. Using the Complex-html template..
@todo allow to pass a custom folder ..
> python -m ontospy.viz.scripts.export_all -o ~/Desktop/test/ --theme random
"""
if outputpath:
if not os.path.exists(outputpath) or not os.path.isdir(outputpath):
click.secho('WARNING: the -o option must include a valid directory path.', fg='red')
sys.exit(0) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
from os.path import expanduser
home = expanduser('~')
outputpath = os.path.join(home, 'ontospy-viz-multi')
if source:
source_folder = source[0]
if not os.path.isdir(source_folder):
click.secho("WARNING: '%s' is not a valid directory path." % source_folder, fg='red')
sys.exit(0) # depends on [control=['if'], data=[]]
files_list = [f for f in os.listdir(source_folder) if os.path.isfile(os.path.join(source_folder, f))]
click.secho("Exporting the directory: '%s'" % source_folder, fg='green')
click.secho('----------', fg='green') # depends on [control=['if'], data=[]]
else:
click.secho("Exporting the local library: '%s'" % get_home_location(), fg='green')
click.secho('----------', fg='green')
files_list = get_localontologies()
source_folder = get_home_location()
report_pages = []
for onto_name in files_list:
full_uri = os.path.join(source_folder, onto_name)
if theme:
if theme == 'random':
_theme = random_theme() # depends on [control=['if'], data=[]]
else:
_theme = theme # depends on [control=['if'], data=[]]
else:
_theme = BOOTSWATCH_THEME_DEFAULT
click.secho("Onto: <%s> Theme: '%s'" % (onto_name, _theme), fg='green')
printDebug('Loading graph...', dim=True)
g = Ontospy(os.path.join(source_folder, onto_name), verbose=verbose)
if g.sources:
# if Ontospy graph has no valid 'sources' = file passed was not valid RDF
printDebug('Building visualization...', dim=True)
onto_name_safe = slugify(unicode(onto_name))
onto_outputpath = os.path.join(outputpath, onto_name_safe)
# note: single static files output path
static_outputpath = os.path.join(outputpath, 'static')
# v = KompleteViz(g, theme=_theme)
v = KompleteVizMultiModel(g, theme=_theme, static_url='../static/', output_path_static=static_outputpath)
try:
# note: onto_outputpath is wiped out each time as part of the build
url = v.build(onto_outputpath)
report_pages.append("<a href='%s/index.html' target='_blank'>%s</a> ('%s' theme)<br />" % (onto_name_safe, onto_name, _theme)) # depends on [control=['try'], data=[]]
except:
e = sys.exc_info()[0]
printDebug('Error: ' + str(e), 'red')
continue # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['onto_name']]
# generate a report page
report_path = os.path.join(outputpath, 'index.html')
html = '\n<html>\n<head>\n <style media="screen">\n a {font-size: 20px; padding: 15px; text-transform: capitalize; text-decoration: none;}\n a:hover {text-decoration: underline;}\n </style>\n</head>\n<body>\n<h1>Ontospy-generated documentation:</h1>\n%s\n</body>\n</html>\n '
with open(report_path, 'w') as text_file:
text_file.write(html % ''.join([x for x in report_pages])) # depends on [control=['with'], data=['text_file']]
# open report
webbrowser.open('file:///' + report_path)
raise SystemExit(1) |
def cmd_jshell(ip, port, verbose):
"""Control a web browser through Websockets.
Bind a port (default: 3333) and listen for HTTP connections.
On connection, send a JavaScript code that opens a WebSocket that
can be used to send commands to the connected browser.
You can write the commands directly in the shell, or use plugins, that
are simply external JavaScript files.
Using habu.jshell you can completely control a web browser.
Reference: https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API
Example:
\b
$ habu.jshell
>> Listening on 192.168.0.10:3333. Waiting for a victim connection.
>> HTTP Request received from 192.168.0.15. Sending hookjs
>> Connection from 192.168.0.15
$ _sessions
0 * 192.168.0.15:33432 Mozilla/5.0 (X11; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0
$ _info
{
"user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0",
"location": "http://192.168.0.10:3333/",
"java-enabled": false,
"platform": "Linux x86_64",
"app-code-name": "Mozilla",
"app-name": "Netscape",
"app-version": "5.0 (X11)",
"cookie-enabled": true,
"language": "es-AR",
"online": true
}
$ document.location
http://192.168.0.10:3333/
"""
global hook_js
hook_js = hook_js.format(ip=ip, port=port)
print('>>> Listening on {}:{}. Waiting for a victim connection.'.format(ip, port))
eventloop = asyncio.get_event_loop()
eventloop.run_until_complete(websockets.serve(handler, ip, port, create_protocol=MyWebSocketServerProtocol))
thread = threading.Thread(target=eventloop.run_forever)
thread.start()
completer = WordCompleter(completer_list + list(runner.internal_commands) + list(runner.external_commands))
history = InMemoryHistory()
while True:
if not thread.is_alive():
break
cmd = prompt('$ ', patch_stdout=True, completer=completer, history=history, lexer=PygmentsLexer(JavascriptLexer))
if cmd:
if cmd == '_help':
runner.cmd_help()
elif runner.sessions:
queue.put_nowait(cmd)
else:
print('>>> No active session!') | def function[cmd_jshell, parameter[ip, port, verbose]]:
constant[Control a web browser through Websockets.
Bind a port (default: 3333) and listen for HTTP connections.
On connection, send a JavaScript code that opens a WebSocket that
can be used to send commands to the connected browser.
You can write the commands directly in the shell, or use plugins, that
are simply external JavaScript files.
Using habu.jshell you can completely control a web browser.
Reference: https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API
Example:
$ habu.jshell
>> Listening on 192.168.0.10:3333. Waiting for a victim connection.
>> HTTP Request received from 192.168.0.15. Sending hookjs
>> Connection from 192.168.0.15
$ _sessions
0 * 192.168.0.15:33432 Mozilla/5.0 (X11; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0
$ _info
{
"user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0",
"location": "http://192.168.0.10:3333/",
"java-enabled": false,
"platform": "Linux x86_64",
"app-code-name": "Mozilla",
"app-name": "Netscape",
"app-version": "5.0 (X11)",
"cookie-enabled": true,
"language": "es-AR",
"online": true
}
$ document.location
http://192.168.0.10:3333/
]
<ast.Global object at 0x7da2054a4460>
variable[hook_js] assign[=] call[name[hook_js].format, parameter[]]
call[name[print], parameter[call[constant[>>> Listening on {}:{}. Waiting for a victim connection.].format, parameter[name[ip], name[port]]]]]
variable[eventloop] assign[=] call[name[asyncio].get_event_loop, parameter[]]
call[name[eventloop].run_until_complete, parameter[call[name[websockets].serve, parameter[name[handler], name[ip], name[port]]]]]
variable[thread] assign[=] call[name[threading].Thread, parameter[]]
call[name[thread].start, parameter[]]
variable[completer] assign[=] call[name[WordCompleter], parameter[binary_operation[binary_operation[name[completer_list] + call[name[list], parameter[name[runner].internal_commands]]] + call[name[list], parameter[name[runner].external_commands]]]]]
variable[history] assign[=] call[name[InMemoryHistory], parameter[]]
while constant[True] begin[:]
if <ast.UnaryOp object at 0x7da2044c3430> begin[:]
break
variable[cmd] assign[=] call[name[prompt], parameter[constant[$ ]]]
if name[cmd] begin[:]
if compare[name[cmd] equal[==] constant[_help]] begin[:]
call[name[runner].cmd_help, parameter[]] | keyword[def] identifier[cmd_jshell] ( identifier[ip] , identifier[port] , identifier[verbose] ):
literal[string]
keyword[global] identifier[hook_js]
identifier[hook_js] = identifier[hook_js] . identifier[format] ( identifier[ip] = identifier[ip] , identifier[port] = identifier[port] )
identifier[print] ( literal[string] . identifier[format] ( identifier[ip] , identifier[port] ))
identifier[eventloop] = identifier[asyncio] . identifier[get_event_loop] ()
identifier[eventloop] . identifier[run_until_complete] ( identifier[websockets] . identifier[serve] ( identifier[handler] , identifier[ip] , identifier[port] , identifier[create_protocol] = identifier[MyWebSocketServerProtocol] ))
identifier[thread] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[eventloop] . identifier[run_forever] )
identifier[thread] . identifier[start] ()
identifier[completer] = identifier[WordCompleter] ( identifier[completer_list] + identifier[list] ( identifier[runner] . identifier[internal_commands] )+ identifier[list] ( identifier[runner] . identifier[external_commands] ))
identifier[history] = identifier[InMemoryHistory] ()
keyword[while] keyword[True] :
keyword[if] keyword[not] identifier[thread] . identifier[is_alive] ():
keyword[break]
identifier[cmd] = identifier[prompt] ( literal[string] , identifier[patch_stdout] = keyword[True] , identifier[completer] = identifier[completer] , identifier[history] = identifier[history] , identifier[lexer] = identifier[PygmentsLexer] ( identifier[JavascriptLexer] ))
keyword[if] identifier[cmd] :
keyword[if] identifier[cmd] == literal[string] :
identifier[runner] . identifier[cmd_help] ()
keyword[elif] identifier[runner] . identifier[sessions] :
identifier[queue] . identifier[put_nowait] ( identifier[cmd] )
keyword[else] :
identifier[print] ( literal[string] ) | def cmd_jshell(ip, port, verbose):
"""Control a web browser through Websockets.
Bind a port (default: 3333) and listen for HTTP connections.
On connection, send a JavaScript code that opens a WebSocket that
can be used to send commands to the connected browser.
You can write the commands directly in the shell, or use plugins, that
are simply external JavaScript files.
Using habu.jshell you can completely control a web browser.
Reference: https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API
Example:
\x08
$ habu.jshell
>> Listening on 192.168.0.10:3333. Waiting for a victim connection.
>> HTTP Request received from 192.168.0.15. Sending hookjs
>> Connection from 192.168.0.15
$ _sessions
0 * 192.168.0.15:33432 Mozilla/5.0 (X11; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0
$ _info
{
"user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0",
"location": "http://192.168.0.10:3333/",
"java-enabled": false,
"platform": "Linux x86_64",
"app-code-name": "Mozilla",
"app-name": "Netscape",
"app-version": "5.0 (X11)",
"cookie-enabled": true,
"language": "es-AR",
"online": true
}
$ document.location
http://192.168.0.10:3333/
"""
global hook_js
hook_js = hook_js.format(ip=ip, port=port)
print('>>> Listening on {}:{}. Waiting for a victim connection.'.format(ip, port))
eventloop = asyncio.get_event_loop()
eventloop.run_until_complete(websockets.serve(handler, ip, port, create_protocol=MyWebSocketServerProtocol))
thread = threading.Thread(target=eventloop.run_forever)
thread.start()
completer = WordCompleter(completer_list + list(runner.internal_commands) + list(runner.external_commands))
history = InMemoryHistory()
while True:
if not thread.is_alive():
break # depends on [control=['if'], data=[]]
cmd = prompt('$ ', patch_stdout=True, completer=completer, history=history, lexer=PygmentsLexer(JavascriptLexer))
if cmd:
if cmd == '_help':
runner.cmd_help() # depends on [control=['if'], data=[]]
elif runner.sessions:
queue.put_nowait(cmd) # depends on [control=['if'], data=[]]
else:
print('>>> No active session!') # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def _add_constraint(self, constraint):
"""
Adds constraint to the ProbModelXML.
"""
constraint_data = self.data['probnet']['AdditionalConstraints'][constraint]
constraint_element = etree.SubElement(
self.additional_constraints, 'Constraint', attrib={'name': constraint})
for argument in sorted(constraint_data):
name = argument
value = constraint_data[name]
etree.SubElement(constraint_element, 'Argument', attrib={'name': name, 'value': value}) | def function[_add_constraint, parameter[self, constraint]]:
constant[
Adds constraint to the ProbModelXML.
]
variable[constraint_data] assign[=] call[call[call[name[self].data][constant[probnet]]][constant[AdditionalConstraints]]][name[constraint]]
variable[constraint_element] assign[=] call[name[etree].SubElement, parameter[name[self].additional_constraints, constant[Constraint]]]
for taget[name[argument]] in starred[call[name[sorted], parameter[name[constraint_data]]]] begin[:]
variable[name] assign[=] name[argument]
variable[value] assign[=] call[name[constraint_data]][name[name]]
call[name[etree].SubElement, parameter[name[constraint_element], constant[Argument]]] | keyword[def] identifier[_add_constraint] ( identifier[self] , identifier[constraint] ):
literal[string]
identifier[constraint_data] = identifier[self] . identifier[data] [ literal[string] ][ literal[string] ][ identifier[constraint] ]
identifier[constraint_element] = identifier[etree] . identifier[SubElement] (
identifier[self] . identifier[additional_constraints] , literal[string] , identifier[attrib] ={ literal[string] : identifier[constraint] })
keyword[for] identifier[argument] keyword[in] identifier[sorted] ( identifier[constraint_data] ):
identifier[name] = identifier[argument]
identifier[value] = identifier[constraint_data] [ identifier[name] ]
identifier[etree] . identifier[SubElement] ( identifier[constraint_element] , literal[string] , identifier[attrib] ={ literal[string] : identifier[name] , literal[string] : identifier[value] }) | def _add_constraint(self, constraint):
"""
Adds constraint to the ProbModelXML.
"""
constraint_data = self.data['probnet']['AdditionalConstraints'][constraint]
constraint_element = etree.SubElement(self.additional_constraints, 'Constraint', attrib={'name': constraint})
for argument in sorted(constraint_data):
name = argument
value = constraint_data[name]
etree.SubElement(constraint_element, 'Argument', attrib={'name': name, 'value': value}) # depends on [control=['for'], data=['argument']] |
def paintEvent(self, event):
"""
Pains the messages and the visible area on the panel.
:param event: paint event infos
"""
if self.isVisible():
# fill background
self._background_brush = QtGui.QBrush(self.editor.background)
painter = QtGui.QPainter(self)
painter.fillRect(event.rect(), self._background_brush)
self._draw_messages(painter)
self._draw_visible_area(painter) | def function[paintEvent, parameter[self, event]]:
constant[
Pains the messages and the visible area on the panel.
:param event: paint event infos
]
if call[name[self].isVisible, parameter[]] begin[:]
name[self]._background_brush assign[=] call[name[QtGui].QBrush, parameter[name[self].editor.background]]
variable[painter] assign[=] call[name[QtGui].QPainter, parameter[name[self]]]
call[name[painter].fillRect, parameter[call[name[event].rect, parameter[]], name[self]._background_brush]]
call[name[self]._draw_messages, parameter[name[painter]]]
call[name[self]._draw_visible_area, parameter[name[painter]]] | keyword[def] identifier[paintEvent] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] identifier[self] . identifier[isVisible] ():
identifier[self] . identifier[_background_brush] = identifier[QtGui] . identifier[QBrush] ( identifier[self] . identifier[editor] . identifier[background] )
identifier[painter] = identifier[QtGui] . identifier[QPainter] ( identifier[self] )
identifier[painter] . identifier[fillRect] ( identifier[event] . identifier[rect] (), identifier[self] . identifier[_background_brush] )
identifier[self] . identifier[_draw_messages] ( identifier[painter] )
identifier[self] . identifier[_draw_visible_area] ( identifier[painter] ) | def paintEvent(self, event):
"""
Pains the messages and the visible area on the panel.
:param event: paint event infos
"""
if self.isVisible():
# fill background
self._background_brush = QtGui.QBrush(self.editor.background)
painter = QtGui.QPainter(self)
painter.fillRect(event.rect(), self._background_brush)
self._draw_messages(painter)
self._draw_visible_area(painter) # depends on [control=['if'], data=[]] |
def cmd_delete_doc(docid):
"""
Arguments: <document_id>
Delete a document.
Possible JSON replies:
--
{ "status": "ok", "docid": "xxxx" }
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
"""
dsearch = get_docsearch()
doc = dsearch.get(docid)
if doc is None:
raise Exception(
"Document {} not found. Cannot delete it".format(
docid
)
)
index_updater = dsearch.get_index_updater(optimize=False)
index_updater.del_doc(doc)
index_updater.commit()
doc.destroy()
verbose("Document {} deleted".format(docid))
reply({
'docid': docid
}) | def function[cmd_delete_doc, parameter[docid]]:
constant[
Arguments: <document_id>
Delete a document.
Possible JSON replies:
--
{ "status": "ok", "docid": "xxxx" }
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
]
variable[dsearch] assign[=] call[name[get_docsearch], parameter[]]
variable[doc] assign[=] call[name[dsearch].get, parameter[name[docid]]]
if compare[name[doc] is constant[None]] begin[:]
<ast.Raise object at 0x7da18bc730d0>
variable[index_updater] assign[=] call[name[dsearch].get_index_updater, parameter[]]
call[name[index_updater].del_doc, parameter[name[doc]]]
call[name[index_updater].commit, parameter[]]
call[name[doc].destroy, parameter[]]
call[name[verbose], parameter[call[constant[Document {} deleted].format, parameter[name[docid]]]]]
call[name[reply], parameter[dictionary[[<ast.Constant object at 0x7da18f09fb80>], [<ast.Name object at 0x7da204566200>]]]] | keyword[def] identifier[cmd_delete_doc] ( identifier[docid] ):
literal[string]
identifier[dsearch] = identifier[get_docsearch] ()
identifier[doc] = identifier[dsearch] . identifier[get] ( identifier[docid] )
keyword[if] identifier[doc] keyword[is] keyword[None] :
keyword[raise] identifier[Exception] (
literal[string] . identifier[format] (
identifier[docid]
)
)
identifier[index_updater] = identifier[dsearch] . identifier[get_index_updater] ( identifier[optimize] = keyword[False] )
identifier[index_updater] . identifier[del_doc] ( identifier[doc] )
identifier[index_updater] . identifier[commit] ()
identifier[doc] . identifier[destroy] ()
identifier[verbose] ( literal[string] . identifier[format] ( identifier[docid] ))
identifier[reply] ({
literal[string] : identifier[docid]
}) | def cmd_delete_doc(docid):
"""
Arguments: <document_id>
Delete a document.
Possible JSON replies:
--
{ "status": "ok", "docid": "xxxx" }
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
"""
dsearch = get_docsearch()
doc = dsearch.get(docid)
if doc is None:
raise Exception('Document {} not found. Cannot delete it'.format(docid)) # depends on [control=['if'], data=[]]
index_updater = dsearch.get_index_updater(optimize=False)
index_updater.del_doc(doc)
index_updater.commit()
doc.destroy()
verbose('Document {} deleted'.format(docid))
reply({'docid': docid}) |
def websafe_dither(self):
"""Return the two websafe colors nearest to this one.
Returns:
A tuple of two grapefruit.Color instances which are the two
web safe colors closest this one.
>>> c = Color.from_rgb(1.0, 0.45, 0.0)
>>> c1, c2 = c.websafe_dither()
>>> c1
Color(1.0, 0.4, 0.0, 1.0)
>>> c2
Color(1.0, 0.6, 0.0, 1.0)
"""
return (
Color(rgb_to_websafe(*self.__rgb), 'rgb', self.__a, self.__wref),
Color(rgb_to_websafe(alt=True, *self.__rgb), 'rgb', self.__a, self.__wref)) | def function[websafe_dither, parameter[self]]:
constant[Return the two websafe colors nearest to this one.
Returns:
A tuple of two grapefruit.Color instances which are the two
web safe colors closest this one.
>>> c = Color.from_rgb(1.0, 0.45, 0.0)
>>> c1, c2 = c.websafe_dither()
>>> c1
Color(1.0, 0.4, 0.0, 1.0)
>>> c2
Color(1.0, 0.6, 0.0, 1.0)
]
return[tuple[[<ast.Call object at 0x7da1b11065c0>, <ast.Call object at 0x7da1b11055d0>]]] | keyword[def] identifier[websafe_dither] ( identifier[self] ):
literal[string]
keyword[return] (
identifier[Color] ( identifier[rgb_to_websafe] (* identifier[self] . identifier[__rgb] ), literal[string] , identifier[self] . identifier[__a] , identifier[self] . identifier[__wref] ),
identifier[Color] ( identifier[rgb_to_websafe] ( identifier[alt] = keyword[True] ,* identifier[self] . identifier[__rgb] ), literal[string] , identifier[self] . identifier[__a] , identifier[self] . identifier[__wref] )) | def websafe_dither(self):
"""Return the two websafe colors nearest to this one.
Returns:
A tuple of two grapefruit.Color instances which are the two
web safe colors closest this one.
>>> c = Color.from_rgb(1.0, 0.45, 0.0)
>>> c1, c2 = c.websafe_dither()
>>> c1
Color(1.0, 0.4, 0.0, 1.0)
>>> c2
Color(1.0, 0.6, 0.0, 1.0)
"""
return (Color(rgb_to_websafe(*self.__rgb), 'rgb', self.__a, self.__wref), Color(rgb_to_websafe(*self.__rgb, alt=True), 'rgb', self.__a, self.__wref)) |
def gblocks(self,
new_path = None,
seq_type = 'nucl' or 'prot'):
"""Apply the gblocks filtering algorithm to the alignment.
See http://molevol.cmima.csic.es/castresana/Gblocks/Gblocks_documentation.html
Need to rename all sequences, because it will complain with long names."""
# Temporary path #
if new_path is None: final = self.__class__(new_temp_path())
else: final = self.__class__(new_path)
# Mapping every sequence name with a random name #
orig_name_to_temp = {seq.description: 'name' + str(i) for i,seq in enumerate(self)}
temp_name_to_orig = {v: k for k, v in orig_name_to_temp.items()}
# Rename every sequence with a random name #
temp_fasta = self.rename_sequences(orig_name_to_temp)
# Options #
if seq_type == 'nucl': t_option = "-t=d"
if seq_type == 'prot': t_option = "-t=p"
# Run it #
result = sh.gblocks91(temp_fasta.path, t_option, '-p=n', "-b4=3", "-b3=20", "-b5=a", _ok_code=[0,1])
created_file = temp_fasta.path + '-gb'
assert os.path.exists(created_file)
# Check errors #
if "Execution terminated" in result.stdout: raise Exception("gblocks crashed again.")
# Back #
temp_fasta.rename_sequences(temp_name_to_orig, final)
# Return #
return final | def function[gblocks, parameter[self, new_path, seq_type]]:
constant[Apply the gblocks filtering algorithm to the alignment.
See http://molevol.cmima.csic.es/castresana/Gblocks/Gblocks_documentation.html
Need to rename all sequences, because it will complain with long names.]
if compare[name[new_path] is constant[None]] begin[:]
variable[final] assign[=] call[name[self].__class__, parameter[call[name[new_temp_path], parameter[]]]]
variable[orig_name_to_temp] assign[=] <ast.DictComp object at 0x7da1b085eaa0>
variable[temp_name_to_orig] assign[=] <ast.DictComp object at 0x7da1b085fa90>
variable[temp_fasta] assign[=] call[name[self].rename_sequences, parameter[name[orig_name_to_temp]]]
if compare[name[seq_type] equal[==] constant[nucl]] begin[:]
variable[t_option] assign[=] constant[-t=d]
if compare[name[seq_type] equal[==] constant[prot]] begin[:]
variable[t_option] assign[=] constant[-t=p]
variable[result] assign[=] call[name[sh].gblocks91, parameter[name[temp_fasta].path, name[t_option], constant[-p=n], constant[-b4=3], constant[-b3=20], constant[-b5=a]]]
variable[created_file] assign[=] binary_operation[name[temp_fasta].path + constant[-gb]]
assert[call[name[os].path.exists, parameter[name[created_file]]]]
if compare[constant[Execution terminated] in name[result].stdout] begin[:]
<ast.Raise object at 0x7da1b0863130>
call[name[temp_fasta].rename_sequences, parameter[name[temp_name_to_orig], name[final]]]
return[name[final]] | keyword[def] identifier[gblocks] ( identifier[self] ,
identifier[new_path] = keyword[None] ,
identifier[seq_type] = literal[string] keyword[or] literal[string] ):
literal[string]
keyword[if] identifier[new_path] keyword[is] keyword[None] : identifier[final] = identifier[self] . identifier[__class__] ( identifier[new_temp_path] ())
keyword[else] : identifier[final] = identifier[self] . identifier[__class__] ( identifier[new_path] )
identifier[orig_name_to_temp] ={ identifier[seq] . identifier[description] : literal[string] + identifier[str] ( identifier[i] ) keyword[for] identifier[i] , identifier[seq] keyword[in] identifier[enumerate] ( identifier[self] )}
identifier[temp_name_to_orig] ={ identifier[v] : identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[orig_name_to_temp] . identifier[items] ()}
identifier[temp_fasta] = identifier[self] . identifier[rename_sequences] ( identifier[orig_name_to_temp] )
keyword[if] identifier[seq_type] == literal[string] : identifier[t_option] = literal[string]
keyword[if] identifier[seq_type] == literal[string] : identifier[t_option] = literal[string]
identifier[result] = identifier[sh] . identifier[gblocks91] ( identifier[temp_fasta] . identifier[path] , identifier[t_option] , literal[string] , literal[string] , literal[string] , literal[string] , identifier[_ok_code] =[ literal[int] , literal[int] ])
identifier[created_file] = identifier[temp_fasta] . identifier[path] + literal[string]
keyword[assert] identifier[os] . identifier[path] . identifier[exists] ( identifier[created_file] )
keyword[if] literal[string] keyword[in] identifier[result] . identifier[stdout] : keyword[raise] identifier[Exception] ( literal[string] )
identifier[temp_fasta] . identifier[rename_sequences] ( identifier[temp_name_to_orig] , identifier[final] )
keyword[return] identifier[final] | def gblocks(self, new_path=None, seq_type='nucl' or 'prot'):
"""Apply the gblocks filtering algorithm to the alignment.
See http://molevol.cmima.csic.es/castresana/Gblocks/Gblocks_documentation.html
Need to rename all sequences, because it will complain with long names."""
# Temporary path #
if new_path is None:
final = self.__class__(new_temp_path()) # depends on [control=['if'], data=[]]
else:
final = self.__class__(new_path)
# Mapping every sequence name with a random name #
orig_name_to_temp = {seq.description: 'name' + str(i) for (i, seq) in enumerate(self)}
temp_name_to_orig = {v: k for (k, v) in orig_name_to_temp.items()}
# Rename every sequence with a random name #
temp_fasta = self.rename_sequences(orig_name_to_temp)
# Options #
if seq_type == 'nucl':
t_option = '-t=d' # depends on [control=['if'], data=[]]
if seq_type == 'prot':
t_option = '-t=p' # depends on [control=['if'], data=[]]
# Run it #
result = sh.gblocks91(temp_fasta.path, t_option, '-p=n', '-b4=3', '-b3=20', '-b5=a', _ok_code=[0, 1])
created_file = temp_fasta.path + '-gb'
assert os.path.exists(created_file)
# Check errors #
if 'Execution terminated' in result.stdout:
raise Exception('gblocks crashed again.') # depends on [control=['if'], data=[]]
# Back #
temp_fasta.rename_sequences(temp_name_to_orig, final)
# Return #
return final |
def get_bpm_tasks(self, **kwargs):
"""
List of (recently) active BPM tasks.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bpm_tasks(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_index: Page Index
:param int page_size: Pagination size
:return: BpmTaskRestPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bpm_tasks_with_http_info(**kwargs)
else:
(data) = self.get_bpm_tasks_with_http_info(**kwargs)
return data | def function[get_bpm_tasks, parameter[self]]:
constant[
List of (recently) active BPM tasks.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bpm_tasks(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_index: Page Index
:param int page_size: Pagination size
:return: BpmTaskRestPage
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[callback]]] begin[:]
return[call[name[self].get_bpm_tasks_with_http_info, parameter[]]] | keyword[def] identifier[get_bpm_tasks] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[get_bpm_tasks_with_http_info] (** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[get_bpm_tasks_with_http_info] (** identifier[kwargs] )
keyword[return] identifier[data] | def get_bpm_tasks(self, **kwargs):
"""
List of (recently) active BPM tasks.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bpm_tasks(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_index: Page Index
:param int page_size: Pagination size
:return: BpmTaskRestPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bpm_tasks_with_http_info(**kwargs) # depends on [control=['if'], data=[]]
else:
data = self.get_bpm_tasks_with_http_info(**kwargs)
return data |
def start_shell(local_ns: Dict=None, banner: str=''):
"""Create and immediately drop into a Python shell.
If IPython version 5 or greater is available it will be used instead
of the built-in python shell.
:param local_ns: An optional dict containing the global namespace of
the newly created shell.
:param banner: An optional banner to render when terminal starts.
"""
if IPYTHON_SHELL_AVAILABLE:
# Don't try to stop IPython from displaying its banner, since
# it's different in every major version
terminal = embed.InteractiveShellEmbed(user_ns={})
terminal.mainloop(local_ns=local_ns)
else:
code.interact(banner=banner, local=local_ns) | def function[start_shell, parameter[local_ns, banner]]:
constant[Create and immediately drop into a Python shell.
If IPython version 5 or greater is available it will be used instead
of the built-in python shell.
:param local_ns: An optional dict containing the global namespace of
the newly created shell.
:param banner: An optional banner to render when terminal starts.
]
if name[IPYTHON_SHELL_AVAILABLE] begin[:]
variable[terminal] assign[=] call[name[embed].InteractiveShellEmbed, parameter[]]
call[name[terminal].mainloop, parameter[]] | keyword[def] identifier[start_shell] ( identifier[local_ns] : identifier[Dict] = keyword[None] , identifier[banner] : identifier[str] = literal[string] ):
literal[string]
keyword[if] identifier[IPYTHON_SHELL_AVAILABLE] :
identifier[terminal] = identifier[embed] . identifier[InteractiveShellEmbed] ( identifier[user_ns] ={})
identifier[terminal] . identifier[mainloop] ( identifier[local_ns] = identifier[local_ns] )
keyword[else] :
identifier[code] . identifier[interact] ( identifier[banner] = identifier[banner] , identifier[local] = identifier[local_ns] ) | def start_shell(local_ns: Dict=None, banner: str=''):
"""Create and immediately drop into a Python shell.
If IPython version 5 or greater is available it will be used instead
of the built-in python shell.
:param local_ns: An optional dict containing the global namespace of
the newly created shell.
:param banner: An optional banner to render when terminal starts.
"""
if IPYTHON_SHELL_AVAILABLE:
# Don't try to stop IPython from displaying its banner, since
# it's different in every major version
terminal = embed.InteractiveShellEmbed(user_ns={})
terminal.mainloop(local_ns=local_ns) # depends on [control=['if'], data=[]]
else:
code.interact(banner=banner, local=local_ns) |
def from_dict(self, subj_files):
"""
Parameters
----------
subj_files: dict of str
file_path -> int/str
"""
for group_label in subj_files:
try:
group_files = subj_files[group_label]
self.items.extend([self._load_image(get_abspath(imgf)) for imgf in group_files])
self.labels.extend([group_label]*len(group_files))
except Exception as exc:
raise Exception('Error while reading files from '
'group {0}.'.format(group_label)) from exc | def function[from_dict, parameter[self, subj_files]]:
constant[
Parameters
----------
subj_files: dict of str
file_path -> int/str
]
for taget[name[group_label]] in starred[name[subj_files]] begin[:]
<ast.Try object at 0x7da1afe0cf70> | keyword[def] identifier[from_dict] ( identifier[self] , identifier[subj_files] ):
literal[string]
keyword[for] identifier[group_label] keyword[in] identifier[subj_files] :
keyword[try] :
identifier[group_files] = identifier[subj_files] [ identifier[group_label] ]
identifier[self] . identifier[items] . identifier[extend] ([ identifier[self] . identifier[_load_image] ( identifier[get_abspath] ( identifier[imgf] )) keyword[for] identifier[imgf] keyword[in] identifier[group_files] ])
identifier[self] . identifier[labels] . identifier[extend] ([ identifier[group_label] ]* identifier[len] ( identifier[group_files] ))
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
keyword[raise] identifier[Exception] ( literal[string]
literal[string] . identifier[format] ( identifier[group_label] )) keyword[from] identifier[exc] | def from_dict(self, subj_files):
"""
Parameters
----------
subj_files: dict of str
file_path -> int/str
"""
for group_label in subj_files:
try:
group_files = subj_files[group_label]
self.items.extend([self._load_image(get_abspath(imgf)) for imgf in group_files])
self.labels.extend([group_label] * len(group_files)) # depends on [control=['try'], data=[]]
except Exception as exc:
raise Exception('Error while reading files from group {0}.'.format(group_label)) from exc # depends on [control=['except'], data=['exc']] # depends on [control=['for'], data=['group_label']] |
def _create_markup_plugin(language, model):
"""
Create a new MarkupPlugin class that represents the plugin type.
"""
form = type("{0}MarkupItemForm".format(language.capitalize()), (MarkupItemForm,), {
'default_language': language,
})
classname = "{0}MarkupPlugin".format(language.capitalize())
PluginClass = type(classname, (MarkupPluginBase,), {
'model': model,
'form': form,
})
return PluginClass | def function[_create_markup_plugin, parameter[language, model]]:
constant[
Create a new MarkupPlugin class that represents the plugin type.
]
variable[form] assign[=] call[name[type], parameter[call[constant[{0}MarkupItemForm].format, parameter[call[name[language].capitalize, parameter[]]]], tuple[[<ast.Name object at 0x7da1b11741f0>]], dictionary[[<ast.Constant object at 0x7da1b1175cf0>], [<ast.Name object at 0x7da1b11751b0>]]]]
variable[classname] assign[=] call[constant[{0}MarkupPlugin].format, parameter[call[name[language].capitalize, parameter[]]]]
variable[PluginClass] assign[=] call[name[type], parameter[name[classname], tuple[[<ast.Name object at 0x7da1b1174490>]], dictionary[[<ast.Constant object at 0x7da1b1175300>, <ast.Constant object at 0x7da1b1176a40>], [<ast.Name object at 0x7da1b1175960>, <ast.Name object at 0x7da1b11747f0>]]]]
return[name[PluginClass]] | keyword[def] identifier[_create_markup_plugin] ( identifier[language] , identifier[model] ):
literal[string]
identifier[form] = identifier[type] ( literal[string] . identifier[format] ( identifier[language] . identifier[capitalize] ()),( identifier[MarkupItemForm] ,),{
literal[string] : identifier[language] ,
})
identifier[classname] = literal[string] . identifier[format] ( identifier[language] . identifier[capitalize] ())
identifier[PluginClass] = identifier[type] ( identifier[classname] ,( identifier[MarkupPluginBase] ,),{
literal[string] : identifier[model] ,
literal[string] : identifier[form] ,
})
keyword[return] identifier[PluginClass] | def _create_markup_plugin(language, model):
"""
Create a new MarkupPlugin class that represents the plugin type.
"""
form = type('{0}MarkupItemForm'.format(language.capitalize()), (MarkupItemForm,), {'default_language': language})
classname = '{0}MarkupPlugin'.format(language.capitalize())
PluginClass = type(classname, (MarkupPluginBase,), {'model': model, 'form': form})
return PluginClass |
def experiment(ctx, project, experiment): # pylint:disable=redefined-outer-name
"""Commands for experiments."""
ctx.obj = ctx.obj or {}
ctx.obj['project'] = project
ctx.obj['experiment'] = experiment | def function[experiment, parameter[ctx, project, experiment]]:
constant[Commands for experiments.]
name[ctx].obj assign[=] <ast.BoolOp object at 0x7da1affc30a0>
call[name[ctx].obj][constant[project]] assign[=] name[project]
call[name[ctx].obj][constant[experiment]] assign[=] name[experiment] | keyword[def] identifier[experiment] ( identifier[ctx] , identifier[project] , identifier[experiment] ):
literal[string]
identifier[ctx] . identifier[obj] = identifier[ctx] . identifier[obj] keyword[or] {}
identifier[ctx] . identifier[obj] [ literal[string] ]= identifier[project]
identifier[ctx] . identifier[obj] [ literal[string] ]= identifier[experiment] | def experiment(ctx, project, experiment): # pylint:disable=redefined-outer-name
'Commands for experiments.'
ctx.obj = ctx.obj or {}
ctx.obj['project'] = project
ctx.obj['experiment'] = experiment |
def get_cmdclass():
""" DEPRICATE """
try:
from Cython.Distutils import build_ext
cmdclass = {'build_ext': build_ext}
return cmdclass
except Exception as ex:
print(ex)
print('WARNING: Cython is not installed. This is only a problem if you are building C extensions')
return {} | def function[get_cmdclass, parameter[]]:
constant[ DEPRICATE ]
<ast.Try object at 0x7da1b24bdb40> | keyword[def] identifier[get_cmdclass] ():
literal[string]
keyword[try] :
keyword[from] identifier[Cython] . identifier[Distutils] keyword[import] identifier[build_ext]
identifier[cmdclass] ={ literal[string] : identifier[build_ext] }
keyword[return] identifier[cmdclass]
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[print] ( identifier[ex] )
identifier[print] ( literal[string] )
keyword[return] {} | def get_cmdclass():
""" DEPRICATE """
try:
from Cython.Distutils import build_ext
cmdclass = {'build_ext': build_ext}
return cmdclass # depends on [control=['try'], data=[]]
except Exception as ex:
print(ex)
print('WARNING: Cython is not installed. This is only a problem if you are building C extensions')
return {} # depends on [control=['except'], data=['ex']] |
def get_index_line(self,lnum):
""" Take the 1-indexed line number and return its index information"""
if lnum < 1:
sys.stderr.write("ERROR: line number should be greater than zero\n")
sys.exit()
elif lnum > len(self._lines):
sys.stderr.write("ERROR: too far this line nuber is not in index\n")
sys.exit()
return self._lines[lnum-1] | def function[get_index_line, parameter[self, lnum]]:
constant[ Take the 1-indexed line number and return its index information]
if compare[name[lnum] less[<] constant[1]] begin[:]
call[name[sys].stderr.write, parameter[constant[ERROR: line number should be greater than zero
]]]
call[name[sys].exit, parameter[]]
return[call[name[self]._lines][binary_operation[name[lnum] - constant[1]]]] | keyword[def] identifier[get_index_line] ( identifier[self] , identifier[lnum] ):
literal[string]
keyword[if] identifier[lnum] < literal[int] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] )
identifier[sys] . identifier[exit] ()
keyword[elif] identifier[lnum] > identifier[len] ( identifier[self] . identifier[_lines] ):
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] )
identifier[sys] . identifier[exit] ()
keyword[return] identifier[self] . identifier[_lines] [ identifier[lnum] - literal[int] ] | def get_index_line(self, lnum):
""" Take the 1-indexed line number and return its index information"""
if lnum < 1:
sys.stderr.write('ERROR: line number should be greater than zero\n')
sys.exit() # depends on [control=['if'], data=[]]
elif lnum > len(self._lines):
sys.stderr.write('ERROR: too far this line nuber is not in index\n')
sys.exit() # depends on [control=['if'], data=[]]
return self._lines[lnum - 1] |
def _set_packet_timestamp(self, v, load=False):
"""
Setter method for packet_timestamp, mapped from YANG variable /interface/port_channel/system/packet_timestamp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_packet_timestamp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_packet_timestamp() directly.
YANG Description: Packet timestamp setting
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=packet_timestamp.packet_timestamp, is_container='container', presence=False, yang_name="packet-timestamp", rest_name="packet-timestamp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Packet timestamp setting', u'callpoint': u'PacketTimestampPoIntf'}}, namespace='urn:brocade.com:mgmt:brocade-packet-timestamp', defining_module='brocade-packet-timestamp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """packet_timestamp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=packet_timestamp.packet_timestamp, is_container='container', presence=False, yang_name="packet-timestamp", rest_name="packet-timestamp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Packet timestamp setting', u'callpoint': u'PacketTimestampPoIntf'}}, namespace='urn:brocade.com:mgmt:brocade-packet-timestamp', defining_module='brocade-packet-timestamp', yang_type='container', is_config=True)""",
})
self.__packet_timestamp = t
if hasattr(self, '_set'):
self._set() | def function[_set_packet_timestamp, parameter[self, v, load]]:
constant[
Setter method for packet_timestamp, mapped from YANG variable /interface/port_channel/system/packet_timestamp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_packet_timestamp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_packet_timestamp() directly.
YANG Description: Packet timestamp setting
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18bccb340>
name[self].__packet_timestamp assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_packet_timestamp] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[packet_timestamp] . identifier[packet_timestamp] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__packet_timestamp] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_packet_timestamp(self, v, load=False):
"""
Setter method for packet_timestamp, mapped from YANG variable /interface/port_channel/system/packet_timestamp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_packet_timestamp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_packet_timestamp() directly.
YANG Description: Packet timestamp setting
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=packet_timestamp.packet_timestamp, is_container='container', presence=False, yang_name='packet-timestamp', rest_name='packet-timestamp', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Packet timestamp setting', u'callpoint': u'PacketTimestampPoIntf'}}, namespace='urn:brocade.com:mgmt:brocade-packet-timestamp', defining_module='brocade-packet-timestamp', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'packet_timestamp must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=packet_timestamp.packet_timestamp, is_container=\'container\', presence=False, yang_name="packet-timestamp", rest_name="packet-timestamp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Packet timestamp setting\', u\'callpoint\': u\'PacketTimestampPoIntf\'}}, namespace=\'urn:brocade.com:mgmt:brocade-packet-timestamp\', defining_module=\'brocade-packet-timestamp\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__packet_timestamp = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def read_from(fpath, verbose=None, aslines=False, strict=True, n=None, errors='replace'):
r""" Reads text from a file. Automatically returns utf8.
Args:
fpath (str): file path
aslines (bool): if True returns list of lines
verbose (bool): verbosity flag
Returns:
str: text from fpath (this is unicode)
Ignore:
x = b'''/whaleshark_003_fors\xc3\xb8g.wmv" />\r\n'''
ut.writeto('foo.txt', x)
y = ut.readfrom('foo.txt')
y.encode('utf8') == x
"""
if n is None:
n = __READ_TAIL_N__
verbose = _rectify_verb_read(verbose)
if verbose:
print('[util_io] * Reading text file: %r ' % util_path.tail(fpath, n=n))
try:
if not util_path.checkpath(fpath, verbose=verbose, n=n):
raise IOError('[io] * FILE DOES NOT EXIST!')
#with open(fpath, 'r') as file_:
with open(fpath, 'rb') as file_:
if aslines:
#text = file_.readlines()
if six.PY2:
# python2 writes in bytes, so read as bytes then convert to
# utf8
text = [line.decode('utf8', errors=errors)
for line in file_.readlines()]
else:
text = [line.decode('utf8', errors=errors)
for line in file_.readlines()]
#text = file_.readlines()
else:
# text = file_.read()
if six.PY2:
text = file_.read().decode('utf8', errors=errors)
else:
#text = file_.read()
text = file_.read().decode('utf8', errors=errors)
return text
except IOError as ex:
from utool import util_dbg
if verbose or strict:
util_dbg.printex(ex, ' * Error reading fpath=%r' %
util_path.tail(fpath, n=n), '[io]')
if strict:
raise | def function[read_from, parameter[fpath, verbose, aslines, strict, n, errors]]:
constant[ Reads text from a file. Automatically returns utf8.
Args:
fpath (str): file path
aslines (bool): if True returns list of lines
verbose (bool): verbosity flag
Returns:
str: text from fpath (this is unicode)
Ignore:
x = b'''/whaleshark_003_fors\xc3\xb8g.wmv" />\r\n'''
ut.writeto('foo.txt', x)
y = ut.readfrom('foo.txt')
y.encode('utf8') == x
]
if compare[name[n] is constant[None]] begin[:]
variable[n] assign[=] name[__READ_TAIL_N__]
variable[verbose] assign[=] call[name[_rectify_verb_read], parameter[name[verbose]]]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[[util_io] * Reading text file: %r ] <ast.Mod object at 0x7da2590d6920> call[name[util_path].tail, parameter[name[fpath]]]]]]
<ast.Try object at 0x7da1b24ae6e0> | keyword[def] identifier[read_from] ( identifier[fpath] , identifier[verbose] = keyword[None] , identifier[aslines] = keyword[False] , identifier[strict] = keyword[True] , identifier[n] = keyword[None] , identifier[errors] = literal[string] ):
literal[string]
keyword[if] identifier[n] keyword[is] keyword[None] :
identifier[n] = identifier[__READ_TAIL_N__]
identifier[verbose] = identifier[_rectify_verb_read] ( identifier[verbose] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] % identifier[util_path] . identifier[tail] ( identifier[fpath] , identifier[n] = identifier[n] ))
keyword[try] :
keyword[if] keyword[not] identifier[util_path] . identifier[checkpath] ( identifier[fpath] , identifier[verbose] = identifier[verbose] , identifier[n] = identifier[n] ):
keyword[raise] identifier[IOError] ( literal[string] )
keyword[with] identifier[open] ( identifier[fpath] , literal[string] ) keyword[as] identifier[file_] :
keyword[if] identifier[aslines] :
keyword[if] identifier[six] . identifier[PY2] :
identifier[text] =[ identifier[line] . identifier[decode] ( literal[string] , identifier[errors] = identifier[errors] )
keyword[for] identifier[line] keyword[in] identifier[file_] . identifier[readlines] ()]
keyword[else] :
identifier[text] =[ identifier[line] . identifier[decode] ( literal[string] , identifier[errors] = identifier[errors] )
keyword[for] identifier[line] keyword[in] identifier[file_] . identifier[readlines] ()]
keyword[else] :
keyword[if] identifier[six] . identifier[PY2] :
identifier[text] = identifier[file_] . identifier[read] (). identifier[decode] ( literal[string] , identifier[errors] = identifier[errors] )
keyword[else] :
identifier[text] = identifier[file_] . identifier[read] (). identifier[decode] ( literal[string] , identifier[errors] = identifier[errors] )
keyword[return] identifier[text]
keyword[except] identifier[IOError] keyword[as] identifier[ex] :
keyword[from] identifier[utool] keyword[import] identifier[util_dbg]
keyword[if] identifier[verbose] keyword[or] identifier[strict] :
identifier[util_dbg] . identifier[printex] ( identifier[ex] , literal[string] %
identifier[util_path] . identifier[tail] ( identifier[fpath] , identifier[n] = identifier[n] ), literal[string] )
keyword[if] identifier[strict] :
keyword[raise] | def read_from(fpath, verbose=None, aslines=False, strict=True, n=None, errors='replace'):
""" Reads text from a file. Automatically returns utf8.
Args:
fpath (str): file path
aslines (bool): if True returns list of lines
verbose (bool): verbosity flag
Returns:
str: text from fpath (this is unicode)
Ignore:
x = b'''/whaleshark_003_fors\\xc3\\xb8g.wmv" />\\r\\n'''
ut.writeto('foo.txt', x)
y = ut.readfrom('foo.txt')
y.encode('utf8') == x
"""
if n is None:
n = __READ_TAIL_N__ # depends on [control=['if'], data=['n']]
verbose = _rectify_verb_read(verbose)
if verbose:
print('[util_io] * Reading text file: %r ' % util_path.tail(fpath, n=n)) # depends on [control=['if'], data=[]]
try:
if not util_path.checkpath(fpath, verbose=verbose, n=n):
raise IOError('[io] * FILE DOES NOT EXIST!') # depends on [control=['if'], data=[]]
#with open(fpath, 'r') as file_:
with open(fpath, 'rb') as file_:
if aslines:
#text = file_.readlines()
if six.PY2:
# python2 writes in bytes, so read as bytes then convert to
# utf8
text = [line.decode('utf8', errors=errors) for line in file_.readlines()] # depends on [control=['if'], data=[]]
else:
text = [line.decode('utf8', errors=errors) for line in file_.readlines()] # depends on [control=['if'], data=[]]
#text = file_.readlines()
# text = file_.read()
elif six.PY2:
text = file_.read().decode('utf8', errors=errors) # depends on [control=['if'], data=[]]
else:
#text = file_.read()
text = file_.read().decode('utf8', errors=errors) # depends on [control=['with'], data=['file_']]
return text # depends on [control=['try'], data=[]]
except IOError as ex:
from utool import util_dbg
if verbose or strict:
util_dbg.printex(ex, ' * Error reading fpath=%r' % util_path.tail(fpath, n=n), '[io]') # depends on [control=['if'], data=[]]
if strict:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['ex']] |
def state(self, states=None):
"""Filter by state.
:param tags: States to filter.
:type tags: ``list``
:return: A list of Node objects.
:rtype: ``list`` of :class:`Node`
"""
if states is None or not states:
return self
nodes = []
for node in self.nodes:
if any(state.lower() == node.state.lower() for state in states):
nodes.append(node)
self.nodes = nodes
return self | def function[state, parameter[self, states]]:
constant[Filter by state.
:param tags: States to filter.
:type tags: ``list``
:return: A list of Node objects.
:rtype: ``list`` of :class:`Node`
]
if <ast.BoolOp object at 0x7da1b1395c60> begin[:]
return[name[self]]
variable[nodes] assign[=] list[[]]
for taget[name[node]] in starred[name[self].nodes] begin[:]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b1395bd0>]] begin[:]
call[name[nodes].append, parameter[name[node]]]
name[self].nodes assign[=] name[nodes]
return[name[self]] | keyword[def] identifier[state] ( identifier[self] , identifier[states] = keyword[None] ):
literal[string]
keyword[if] identifier[states] keyword[is] keyword[None] keyword[or] keyword[not] identifier[states] :
keyword[return] identifier[self]
identifier[nodes] =[]
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[nodes] :
keyword[if] identifier[any] ( identifier[state] . identifier[lower] ()== identifier[node] . identifier[state] . identifier[lower] () keyword[for] identifier[state] keyword[in] identifier[states] ):
identifier[nodes] . identifier[append] ( identifier[node] )
identifier[self] . identifier[nodes] = identifier[nodes]
keyword[return] identifier[self] | def state(self, states=None):
"""Filter by state.
:param tags: States to filter.
:type tags: ``list``
:return: A list of Node objects.
:rtype: ``list`` of :class:`Node`
"""
if states is None or not states:
return self # depends on [control=['if'], data=[]]
nodes = []
for node in self.nodes:
if any((state.lower() == node.state.lower() for state in states)):
nodes.append(node) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
self.nodes = nodes
return self |
def run_bafRegress(filenames, out_prefix, extract_filename, freq_filename,
options):
"""Runs the bafRegress function.
:param filenames: the set of all sample files.
:param out_prefix: the output prefix.
:param extract_filename: the name of the markers to extract.
:param freq_filename: the name of the file containing the frequency.
:param options: the other options.
:type filenames: set
:type out_prefix: str
:type extract_filename: str
:type freq_filename: str
:type options: argparse.Namespace
"""
# The command
command = [
"bafRegress.py",
"estimate",
"--freqfile", freq_filename,
"--freqcol", "2,5",
"--extract", extract_filename,
"--colsample", options.colsample,
"--colmarker", options.colmarker,
"--colbaf", options.colbaf,
"--colab1", options.colab1,
"--colab2", options.colab2,
]
command.extend(filenames)
output = None
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT,
shell=False)
except subprocess.CalledProcessError as exc:
raise ProgramError("bafRegress.py: couldn't run "
"bafRegress.py\n{}".format(exc.output))
# Saving the output
try:
with open(out_prefix + ".bafRegress", "w") as o_file:
o_file.write(output)
except IOError:
raise ProgramError("{}: cannot write file".format(
out_prefix + ".bafRegress",
)) | def function[run_bafRegress, parameter[filenames, out_prefix, extract_filename, freq_filename, options]]:
constant[Runs the bafRegress function.
:param filenames: the set of all sample files.
:param out_prefix: the output prefix.
:param extract_filename: the name of the markers to extract.
:param freq_filename: the name of the file containing the frequency.
:param options: the other options.
:type filenames: set
:type out_prefix: str
:type extract_filename: str
:type freq_filename: str
:type options: argparse.Namespace
]
variable[command] assign[=] list[[<ast.Constant object at 0x7da1b095fdc0>, <ast.Constant object at 0x7da1b095efb0>, <ast.Constant object at 0x7da1b095eaa0>, <ast.Name object at 0x7da1b095cf10>, <ast.Constant object at 0x7da1b095f790>, <ast.Constant object at 0x7da1b095cf70>, <ast.Constant object at 0x7da1b095eda0>, <ast.Name object at 0x7da1b095cf40>, <ast.Constant object at 0x7da1b095d630>, <ast.Attribute object at 0x7da1b095df30>, <ast.Constant object at 0x7da1b095f970>, <ast.Attribute object at 0x7da1b095dba0>, <ast.Constant object at 0x7da1b095f640>, <ast.Attribute object at 0x7da1b095f010>, <ast.Constant object at 0x7da1b095ef80>, <ast.Attribute object at 0x7da1b095dbd0>, <ast.Constant object at 0x7da1b095d2d0>, <ast.Attribute object at 0x7da1b095e230>]]
call[name[command].extend, parameter[name[filenames]]]
variable[output] assign[=] constant[None]
<ast.Try object at 0x7da1b095f610>
<ast.Try object at 0x7da1b095ebf0> | keyword[def] identifier[run_bafRegress] ( identifier[filenames] , identifier[out_prefix] , identifier[extract_filename] , identifier[freq_filename] ,
identifier[options] ):
literal[string]
identifier[command] =[
literal[string] ,
literal[string] ,
literal[string] , identifier[freq_filename] ,
literal[string] , literal[string] ,
literal[string] , identifier[extract_filename] ,
literal[string] , identifier[options] . identifier[colsample] ,
literal[string] , identifier[options] . identifier[colmarker] ,
literal[string] , identifier[options] . identifier[colbaf] ,
literal[string] , identifier[options] . identifier[colab1] ,
literal[string] , identifier[options] . identifier[colab2] ,
]
identifier[command] . identifier[extend] ( identifier[filenames] )
identifier[output] = keyword[None]
keyword[try] :
identifier[output] = identifier[subprocess] . identifier[check_output] ( identifier[command] , identifier[stderr] = identifier[subprocess] . identifier[STDOUT] ,
identifier[shell] = keyword[False] )
keyword[except] identifier[subprocess] . identifier[CalledProcessError] keyword[as] identifier[exc] :
keyword[raise] identifier[ProgramError] ( literal[string]
literal[string] . identifier[format] ( identifier[exc] . identifier[output] ))
keyword[try] :
keyword[with] identifier[open] ( identifier[out_prefix] + literal[string] , literal[string] ) keyword[as] identifier[o_file] :
identifier[o_file] . identifier[write] ( identifier[output] )
keyword[except] identifier[IOError] :
keyword[raise] identifier[ProgramError] ( literal[string] . identifier[format] (
identifier[out_prefix] + literal[string] ,
)) | def run_bafRegress(filenames, out_prefix, extract_filename, freq_filename, options):
"""Runs the bafRegress function.
:param filenames: the set of all sample files.
:param out_prefix: the output prefix.
:param extract_filename: the name of the markers to extract.
:param freq_filename: the name of the file containing the frequency.
:param options: the other options.
:type filenames: set
:type out_prefix: str
:type extract_filename: str
:type freq_filename: str
:type options: argparse.Namespace
"""
# The command
command = ['bafRegress.py', 'estimate', '--freqfile', freq_filename, '--freqcol', '2,5', '--extract', extract_filename, '--colsample', options.colsample, '--colmarker', options.colmarker, '--colbaf', options.colbaf, '--colab1', options.colab1, '--colab2', options.colab2]
command.extend(filenames)
output = None
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=False) # depends on [control=['try'], data=[]]
except subprocess.CalledProcessError as exc:
raise ProgramError("bafRegress.py: couldn't run bafRegress.py\n{}".format(exc.output)) # depends on [control=['except'], data=['exc']]
# Saving the output
try:
with open(out_prefix + '.bafRegress', 'w') as o_file:
o_file.write(output) # depends on [control=['with'], data=['o_file']] # depends on [control=['try'], data=[]]
except IOError:
raise ProgramError('{}: cannot write file'.format(out_prefix + '.bafRegress')) # depends on [control=['except'], data=[]] |
def flatten_tree_to_ident_hashes(item_or_tree,
lucent_id=TRANSLUCENT_BINDER_ID):
"""Flatten a tree to id and version values (ident_hash)."""
if 'contents' in item_or_tree:
tree = item_or_tree
if tree['id'] != lucent_id:
yield tree['id']
for i in tree['contents']:
# yield from flatten_tree_to_ident_hashs(i, lucent_id)
for x in flatten_tree_to_ident_hashes(i, lucent_id):
yield x
else:
item = item_or_tree
yield item['id'] | def function[flatten_tree_to_ident_hashes, parameter[item_or_tree, lucent_id]]:
constant[Flatten a tree to id and version values (ident_hash).]
if compare[constant[contents] in name[item_or_tree]] begin[:]
variable[tree] assign[=] name[item_or_tree]
if compare[call[name[tree]][constant[id]] not_equal[!=] name[lucent_id]] begin[:]
<ast.Yield object at 0x7da1b197e470>
for taget[name[i]] in starred[call[name[tree]][constant[contents]]] begin[:]
for taget[name[x]] in starred[call[name[flatten_tree_to_ident_hashes], parameter[name[i], name[lucent_id]]]] begin[:]
<ast.Yield object at 0x7da1b197f820> | keyword[def] identifier[flatten_tree_to_ident_hashes] ( identifier[item_or_tree] ,
identifier[lucent_id] = identifier[TRANSLUCENT_BINDER_ID] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[item_or_tree] :
identifier[tree] = identifier[item_or_tree]
keyword[if] identifier[tree] [ literal[string] ]!= identifier[lucent_id] :
keyword[yield] identifier[tree] [ literal[string] ]
keyword[for] identifier[i] keyword[in] identifier[tree] [ literal[string] ]:
keyword[for] identifier[x] keyword[in] identifier[flatten_tree_to_ident_hashes] ( identifier[i] , identifier[lucent_id] ):
keyword[yield] identifier[x]
keyword[else] :
identifier[item] = identifier[item_or_tree]
keyword[yield] identifier[item] [ literal[string] ] | def flatten_tree_to_ident_hashes(item_or_tree, lucent_id=TRANSLUCENT_BINDER_ID):
"""Flatten a tree to id and version values (ident_hash)."""
if 'contents' in item_or_tree:
tree = item_or_tree
if tree['id'] != lucent_id:
yield tree['id'] # depends on [control=['if'], data=[]]
for i in tree['contents']:
# yield from flatten_tree_to_ident_hashs(i, lucent_id)
for x in flatten_tree_to_ident_hashes(i, lucent_id):
yield x # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=['item_or_tree']]
else:
item = item_or_tree
yield item['id'] |
async def service(self, limit=None, quota: Optional[Quota] = None) -> int:
"""
Service `limit` number of received messages in this stack.
:param limit: the maximum number of messages to be processed. If None,
processes all of the messages in rxMsgs.
:return: the number of messages processed.
"""
if self.listener:
await self._serviceStack(self.age, quota)
else:
logger.info("{} is stopped".format(self))
r = len(self.rxMsgs)
if r > 0:
pracLimit = limit if limit else sys.maxsize
return self.processReceived(pracLimit)
return 0 | <ast.AsyncFunctionDef object at 0x7da1b16b17b0> | keyword[async] keyword[def] identifier[service] ( identifier[self] , identifier[limit] = keyword[None] , identifier[quota] : identifier[Optional] [ identifier[Quota] ]= keyword[None] )-> identifier[int] :
literal[string]
keyword[if] identifier[self] . identifier[listener] :
keyword[await] identifier[self] . identifier[_serviceStack] ( identifier[self] . identifier[age] , identifier[quota] )
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] ))
identifier[r] = identifier[len] ( identifier[self] . identifier[rxMsgs] )
keyword[if] identifier[r] > literal[int] :
identifier[pracLimit] = identifier[limit] keyword[if] identifier[limit] keyword[else] identifier[sys] . identifier[maxsize]
keyword[return] identifier[self] . identifier[processReceived] ( identifier[pracLimit] )
keyword[return] literal[int] | async def service(self, limit=None, quota: Optional[Quota]=None) -> int:
"""
Service `limit` number of received messages in this stack.
:param limit: the maximum number of messages to be processed. If None,
processes all of the messages in rxMsgs.
:return: the number of messages processed.
"""
if self.listener:
await self._serviceStack(self.age, quota) # depends on [control=['if'], data=[]]
else:
logger.info('{} is stopped'.format(self))
r = len(self.rxMsgs)
if r > 0:
pracLimit = limit if limit else sys.maxsize
return self.processReceived(pracLimit) # depends on [control=['if'], data=[]]
return 0 |
def message(self):
'the standard message which can be transfer'
return {
'source':
'account',
'frequence':
self.frequence,
'account_cookie':
self.account_cookie,
'portfolio_cookie':
self.portfolio_cookie,
'user_cookie':
self.user_cookie,
'broker':
self.broker,
'market_type':
self.market_type,
'strategy_name':
self.strategy_name,
'current_time':
str(self._currenttime),
'allow_sellopen':
self.allow_sellopen,
'allow_margin':
self.allow_margin,
'allow_t0':
self.allow_t0,
'margin_level':
self.margin_level,
'init_assets':
self.init_assets,
'init_cash':
self.init_cash,
'init_hold':
self.init_hold.to_dict(),
'commission_coeff':
self.commission_coeff,
'tax_coeff':
self.tax_coeff,
'cash':
self.cash,
'history':
self.history,
'trade_index':
self.time_index_max,
'running_time':
str(datetime.datetime.now())
if self.running_time is None else str(self.running_time),
'quantaxis_version':
self.quantaxis_version,
'running_environment':
self.running_environment,
'start_date':
self.start_date,
'end_date':
self.end_date,
'frozen':
self.frozen,
'finished_id':
self.finishedOrderid
} | def function[message, parameter[self]]:
constant[the standard message which can be transfer]
return[dictionary[[<ast.Constant object at 0x7da1b20466b0>, <ast.Constant object at 0x7da1b20454b0>, <ast.Constant object at 0x7da1b2047ca0>, <ast.Constant object at 0x7da1b20442b0>, <ast.Constant object at 0x7da1b2047160>, <ast.Constant object at 0x7da1b2045450>, <ast.Constant object at 0x7da1b2044700>, <ast.Constant object at 0x7da1b2044c10>, <ast.Constant object at 0x7da1b20461d0>, <ast.Constant object at 0x7da1b2045600>, <ast.Constant object at 0x7da1b2045e70>, <ast.Constant object at 0x7da1b2047c70>, <ast.Constant object at 0x7da1b2044250>, <ast.Constant object at 0x7da1b2047520>, <ast.Constant object at 0x7da1b2047460>, <ast.Constant object at 0x7da1b2045210>, <ast.Constant object at 0x7da1b2044dc0>, <ast.Constant object at 0x7da1b20447f0>, <ast.Constant object at 0x7da1b20451b0>, <ast.Constant object at 0x7da1b2046590>, <ast.Constant object at 0x7da1b2046890>, <ast.Constant object at 0x7da1b2044850>, <ast.Constant object at 0x7da1b20475b0>, <ast.Constant object at 0x7da1b2044880>, <ast.Constant object at 0x7da1b2044fa0>, <ast.Constant object at 0x7da1b2045570>, <ast.Constant object at 0x7da1b20457b0>, <ast.Constant object at 0x7da1b2044790>], [<ast.Constant object at 0x7da1b20478e0>, <ast.Attribute object at 0x7da1b2047640>, <ast.Attribute object at 0x7da1b2047d90>, <ast.Attribute object at 0x7da1b20455d0>, <ast.Attribute object at 0x7da1b2044820>, <ast.Attribute object at 0x7da1b2045000>, <ast.Attribute object at 0x7da1b2044100>, <ast.Attribute object at 0x7da1b2046b60>, <ast.Call object at 0x7da1b2047b50>, <ast.Attribute object at 0x7da1b2044eb0>, <ast.Attribute object at 0x7da1b2044be0>, <ast.Attribute object at 0x7da1b20451e0>, <ast.Attribute object at 0x7da1b20465c0>, <ast.Attribute object at 0x7da1b2045ae0>, <ast.Attribute object at 0x7da1b2044400>, <ast.Call object at 0x7da1b2046170>, <ast.Attribute object at 0x7da1b2046d10>, <ast.Attribute object at 0x7da1b2044d00>, <ast.Attribute object at 0x7da1b2045180>, <ast.Attribute object at 0x7da1b20479d0>, <ast.Attribute object at 0x7da1b20474c0>, <ast.IfExp object at 0x7da1b2047dc0>, <ast.Attribute object at 0x7da1b2046200>, <ast.Attribute object at 0x7da1b20456c0>, <ast.Attribute object at 0x7da1b20444f0>, <ast.Attribute object at 0x7da1b2047760>, <ast.Attribute object at 0x7da1b2047790>, <ast.Attribute object at 0x7da18bc73bb0>]]] | keyword[def] identifier[message] ( identifier[self] ):
literal[string]
keyword[return] {
literal[string] :
literal[string] ,
literal[string] :
identifier[self] . identifier[frequence] ,
literal[string] :
identifier[self] . identifier[account_cookie] ,
literal[string] :
identifier[self] . identifier[portfolio_cookie] ,
literal[string] :
identifier[self] . identifier[user_cookie] ,
literal[string] :
identifier[self] . identifier[broker] ,
literal[string] :
identifier[self] . identifier[market_type] ,
literal[string] :
identifier[self] . identifier[strategy_name] ,
literal[string] :
identifier[str] ( identifier[self] . identifier[_currenttime] ),
literal[string] :
identifier[self] . identifier[allow_sellopen] ,
literal[string] :
identifier[self] . identifier[allow_margin] ,
literal[string] :
identifier[self] . identifier[allow_t0] ,
literal[string] :
identifier[self] . identifier[margin_level] ,
literal[string] :
identifier[self] . identifier[init_assets] ,
literal[string] :
identifier[self] . identifier[init_cash] ,
literal[string] :
identifier[self] . identifier[init_hold] . identifier[to_dict] (),
literal[string] :
identifier[self] . identifier[commission_coeff] ,
literal[string] :
identifier[self] . identifier[tax_coeff] ,
literal[string] :
identifier[self] . identifier[cash] ,
literal[string] :
identifier[self] . identifier[history] ,
literal[string] :
identifier[self] . identifier[time_index_max] ,
literal[string] :
identifier[str] ( identifier[datetime] . identifier[datetime] . identifier[now] ())
keyword[if] identifier[self] . identifier[running_time] keyword[is] keyword[None] keyword[else] identifier[str] ( identifier[self] . identifier[running_time] ),
literal[string] :
identifier[self] . identifier[quantaxis_version] ,
literal[string] :
identifier[self] . identifier[running_environment] ,
literal[string] :
identifier[self] . identifier[start_date] ,
literal[string] :
identifier[self] . identifier[end_date] ,
literal[string] :
identifier[self] . identifier[frozen] ,
literal[string] :
identifier[self] . identifier[finishedOrderid]
} | def message(self):
"""the standard message which can be transfer"""
return {'source': 'account', 'frequence': self.frequence, 'account_cookie': self.account_cookie, 'portfolio_cookie': self.portfolio_cookie, 'user_cookie': self.user_cookie, 'broker': self.broker, 'market_type': self.market_type, 'strategy_name': self.strategy_name, 'current_time': str(self._currenttime), 'allow_sellopen': self.allow_sellopen, 'allow_margin': self.allow_margin, 'allow_t0': self.allow_t0, 'margin_level': self.margin_level, 'init_assets': self.init_assets, 'init_cash': self.init_cash, 'init_hold': self.init_hold.to_dict(), 'commission_coeff': self.commission_coeff, 'tax_coeff': self.tax_coeff, 'cash': self.cash, 'history': self.history, 'trade_index': self.time_index_max, 'running_time': str(datetime.datetime.now()) if self.running_time is None else str(self.running_time), 'quantaxis_version': self.quantaxis_version, 'running_environment': self.running_environment, 'start_date': self.start_date, 'end_date': self.end_date, 'frozen': self.frozen, 'finished_id': self.finishedOrderid} |
def amint_to_char(am, hij=False, use_L=False):
'''Convert an angular momentum integer to a character
The input is a list (to handle sp, spd, ... orbitals). The return
value is a string
For example, converts [0] to 's' and [0,1,2] to 'spd'
If hij is True, the ordering spdfghijkl is used. Otherwise, the
ordering will be spdfghikl (skipping j)
If use_L is True, sp shells ([0,1]) will return l instead
'''
if use_L and am == [0, 1]:
return 'l'
if hij:
amchar_map = _amchar_map_hij
else:
amchar_map = _amchar_map_hik
amchar = []
for a in am:
if a < 0:
raise IndexError('Angular momentum must be a positive integer (not {})'.format(a))
if a >= len(amchar_map):
raise IndexError('Angular momentum {} out of range. Must be less than {}'.format(a, len(amchar_map)))
amchar.append(amchar_map[a])
return ''.join(amchar) | def function[amint_to_char, parameter[am, hij, use_L]]:
constant[Convert an angular momentum integer to a character
The input is a list (to handle sp, spd, ... orbitals). The return
value is a string
For example, converts [0] to 's' and [0,1,2] to 'spd'
If hij is True, the ordering spdfghijkl is used. Otherwise, the
ordering will be spdfghikl (skipping j)
If use_L is True, sp shells ([0,1]) will return l instead
]
if <ast.BoolOp object at 0x7da1b1e9b8e0> begin[:]
return[constant[l]]
if name[hij] begin[:]
variable[amchar_map] assign[=] name[_amchar_map_hij]
variable[amchar] assign[=] list[[]]
for taget[name[a]] in starred[name[am]] begin[:]
if compare[name[a] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1e99270>
if compare[name[a] greater_or_equal[>=] call[name[len], parameter[name[amchar_map]]]] begin[:]
<ast.Raise object at 0x7da1b1e98ee0>
call[name[amchar].append, parameter[call[name[amchar_map]][name[a]]]]
return[call[constant[].join, parameter[name[amchar]]]] | keyword[def] identifier[amint_to_char] ( identifier[am] , identifier[hij] = keyword[False] , identifier[use_L] = keyword[False] ):
literal[string]
keyword[if] identifier[use_L] keyword[and] identifier[am] ==[ literal[int] , literal[int] ]:
keyword[return] literal[string]
keyword[if] identifier[hij] :
identifier[amchar_map] = identifier[_amchar_map_hij]
keyword[else] :
identifier[amchar_map] = identifier[_amchar_map_hik]
identifier[amchar] =[]
keyword[for] identifier[a] keyword[in] identifier[am] :
keyword[if] identifier[a] < literal[int] :
keyword[raise] identifier[IndexError] ( literal[string] . identifier[format] ( identifier[a] ))
keyword[if] identifier[a] >= identifier[len] ( identifier[amchar_map] ):
keyword[raise] identifier[IndexError] ( literal[string] . identifier[format] ( identifier[a] , identifier[len] ( identifier[amchar_map] )))
identifier[amchar] . identifier[append] ( identifier[amchar_map] [ identifier[a] ])
keyword[return] literal[string] . identifier[join] ( identifier[amchar] ) | def amint_to_char(am, hij=False, use_L=False):
"""Convert an angular momentum integer to a character
The input is a list (to handle sp, spd, ... orbitals). The return
value is a string
For example, converts [0] to 's' and [0,1,2] to 'spd'
If hij is True, the ordering spdfghijkl is used. Otherwise, the
ordering will be spdfghikl (skipping j)
If use_L is True, sp shells ([0,1]) will return l instead
"""
if use_L and am == [0, 1]:
return 'l' # depends on [control=['if'], data=[]]
if hij:
amchar_map = _amchar_map_hij # depends on [control=['if'], data=[]]
else:
amchar_map = _amchar_map_hik
amchar = []
for a in am:
if a < 0:
raise IndexError('Angular momentum must be a positive integer (not {})'.format(a)) # depends on [control=['if'], data=['a']]
if a >= len(amchar_map):
raise IndexError('Angular momentum {} out of range. Must be less than {}'.format(a, len(amchar_map))) # depends on [control=['if'], data=['a']]
amchar.append(amchar_map[a]) # depends on [control=['for'], data=['a']]
return ''.join(amchar) |
def teardown_app_request(self, func: Callable) -> Callable:
"""Add a teardown request function to the app.
This is designed to be used as a decorator, and has the same
arguments as :meth:`~quart.Quart.teardown_request`. It applies
to all requests to the app this blueprint is registered on. An
example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.teardown_app_request
def teardown():
...
"""
self.record_once(lambda state: state.app.teardown_request(func))
return func | def function[teardown_app_request, parameter[self, func]]:
constant[Add a teardown request function to the app.
This is designed to be used as a decorator, and has the same
arguments as :meth:`~quart.Quart.teardown_request`. It applies
to all requests to the app this blueprint is registered on. An
example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.teardown_app_request
def teardown():
...
]
call[name[self].record_once, parameter[<ast.Lambda object at 0x7da18f58f670>]]
return[name[func]] | keyword[def] identifier[teardown_app_request] ( identifier[self] , identifier[func] : identifier[Callable] )-> identifier[Callable] :
literal[string]
identifier[self] . identifier[record_once] ( keyword[lambda] identifier[state] : identifier[state] . identifier[app] . identifier[teardown_request] ( identifier[func] ))
keyword[return] identifier[func] | def teardown_app_request(self, func: Callable) -> Callable:
"""Add a teardown request function to the app.
This is designed to be used as a decorator, and has the same
arguments as :meth:`~quart.Quart.teardown_request`. It applies
to all requests to the app this blueprint is registered on. An
example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.teardown_app_request
def teardown():
...
"""
self.record_once(lambda state: state.app.teardown_request(func))
return func |
def dequote_docstring(text):
"""Remove the quotes delimiting a docstring."""
# TODO: Process escaped characters unless raw mode?
text = text.strip()
if len(text) > 6 and text[:3] == text[-3:] == '"""':
# Standard case, """..."""
return text[3:-3]
if len(text) > 7 and text[:4] in ('u"""', 'r"""') and text[-3:] == '"""':
# Unicode, u"""...""", or raw r"""..."""
return text[4:-3]
# Other flake8 tools will report atypical quotes:
if len(text) > 6 and text[:3] == text[-3:] == "'''":
return text[3:-3]
if len(text) > 7 and text[:4] in ("u'''", "r'''") and text[-3:] == "'''":
return text[4:-3]
if len(text) > 2 and text[0] == text[-1] == '"':
return text[1:-1]
if len(text) > 3 and text[:2] in ('u"', 'r"') and text[-1] == '"':
return text[2:-1]
if len(text) > 2 and text[0] == text[-1] == "'":
return text[1:-1]
if len(text) > 3 and text[:2] in ("u'", "r'") and text[-1] == "'":
return text[2:-1]
raise ValueError("Bad quotes!") | def function[dequote_docstring, parameter[text]]:
constant[Remove the quotes delimiting a docstring.]
variable[text] assign[=] call[name[text].strip, parameter[]]
if <ast.BoolOp object at 0x7da204346140> begin[:]
return[call[name[text]][<ast.Slice object at 0x7da204347e50>]]
if <ast.BoolOp object at 0x7da204345780> begin[:]
return[call[name[text]][<ast.Slice object at 0x7da204344e20>]]
if <ast.BoolOp object at 0x7da204347ee0> begin[:]
return[call[name[text]][<ast.Slice object at 0x7da2043479a0>]]
if <ast.BoolOp object at 0x7da204345270> begin[:]
return[call[name[text]][<ast.Slice object at 0x7da204344670>]]
if <ast.BoolOp object at 0x7da204346350> begin[:]
return[call[name[text]][<ast.Slice object at 0x7da204344e50>]]
if <ast.BoolOp object at 0x7da2043448b0> begin[:]
return[call[name[text]][<ast.Slice object at 0x7da204344370>]]
if <ast.BoolOp object at 0x7da204347460> begin[:]
return[call[name[text]][<ast.Slice object at 0x7da2043458a0>]]
if <ast.BoolOp object at 0x7da204345510> begin[:]
return[call[name[text]][<ast.Slice object at 0x7da204347970>]]
<ast.Raise object at 0x7da204346ce0> | keyword[def] identifier[dequote_docstring] ( identifier[text] ):
literal[string]
identifier[text] = identifier[text] . identifier[strip] ()
keyword[if] identifier[len] ( identifier[text] )> literal[int] keyword[and] identifier[text] [: literal[int] ]== identifier[text] [- literal[int] :]== literal[string] :
keyword[return] identifier[text] [ literal[int] :- literal[int] ]
keyword[if] identifier[len] ( identifier[text] )> literal[int] keyword[and] identifier[text] [: literal[int] ] keyword[in] ( literal[string] , literal[string] ) keyword[and] identifier[text] [- literal[int] :]== literal[string] :
keyword[return] identifier[text] [ literal[int] :- literal[int] ]
keyword[if] identifier[len] ( identifier[text] )> literal[int] keyword[and] identifier[text] [: literal[int] ]== identifier[text] [- literal[int] :]== literal[string] :
keyword[return] identifier[text] [ literal[int] :- literal[int] ]
keyword[if] identifier[len] ( identifier[text] )> literal[int] keyword[and] identifier[text] [: literal[int] ] keyword[in] ( literal[string] , literal[string] ) keyword[and] identifier[text] [- literal[int] :]== literal[string] :
keyword[return] identifier[text] [ literal[int] :- literal[int] ]
keyword[if] identifier[len] ( identifier[text] )> literal[int] keyword[and] identifier[text] [ literal[int] ]== identifier[text] [- literal[int] ]== literal[string] :
keyword[return] identifier[text] [ literal[int] :- literal[int] ]
keyword[if] identifier[len] ( identifier[text] )> literal[int] keyword[and] identifier[text] [: literal[int] ] keyword[in] ( literal[string] , literal[string] ) keyword[and] identifier[text] [- literal[int] ]== literal[string] :
keyword[return] identifier[text] [ literal[int] :- literal[int] ]
keyword[if] identifier[len] ( identifier[text] )> literal[int] keyword[and] identifier[text] [ literal[int] ]== identifier[text] [- literal[int] ]== literal[string] :
keyword[return] identifier[text] [ literal[int] :- literal[int] ]
keyword[if] identifier[len] ( identifier[text] )> literal[int] keyword[and] identifier[text] [: literal[int] ] keyword[in] ( literal[string] , literal[string] ) keyword[and] identifier[text] [- literal[int] ]== literal[string] :
keyword[return] identifier[text] [ literal[int] :- literal[int] ]
keyword[raise] identifier[ValueError] ( literal[string] ) | def dequote_docstring(text):
"""Remove the quotes delimiting a docstring."""
# TODO: Process escaped characters unless raw mode?
text = text.strip()
if len(text) > 6 and text[:3] == text[-3:] == '"""':
# Standard case, """..."""
return text[3:-3] # depends on [control=['if'], data=[]]
if len(text) > 7 and text[:4] in ('u"""', 'r"""') and (text[-3:] == '"""'):
# Unicode, u"""...""", or raw r"""..."""
return text[4:-3] # depends on [control=['if'], data=[]]
# Other flake8 tools will report atypical quotes:
if len(text) > 6 and text[:3] == text[-3:] == "'''":
return text[3:-3] # depends on [control=['if'], data=[]]
if len(text) > 7 and text[:4] in ("u'''", "r'''") and (text[-3:] == "'''"):
return text[4:-3] # depends on [control=['if'], data=[]]
if len(text) > 2 and text[0] == text[-1] == '"':
return text[1:-1] # depends on [control=['if'], data=[]]
if len(text) > 3 and text[:2] in ('u"', 'r"') and (text[-1] == '"'):
return text[2:-1] # depends on [control=['if'], data=[]]
if len(text) > 2 and text[0] == text[-1] == "'":
return text[1:-1] # depends on [control=['if'], data=[]]
if len(text) > 3 and text[:2] in ("u'", "r'") and (text[-1] == "'"):
return text[2:-1] # depends on [control=['if'], data=[]]
raise ValueError('Bad quotes!') |
def waypoint_request_send(self, seq):
'''wrapper for waypoint_request_send'''
if self.mavlink10():
self.mav.mission_request_send(self.target_system, self.target_component, seq)
else:
self.mav.waypoint_request_send(self.target_system, self.target_component, seq) | def function[waypoint_request_send, parameter[self, seq]]:
constant[wrapper for waypoint_request_send]
if call[name[self].mavlink10, parameter[]] begin[:]
call[name[self].mav.mission_request_send, parameter[name[self].target_system, name[self].target_component, name[seq]]] | keyword[def] identifier[waypoint_request_send] ( identifier[self] , identifier[seq] ):
literal[string]
keyword[if] identifier[self] . identifier[mavlink10] ():
identifier[self] . identifier[mav] . identifier[mission_request_send] ( identifier[self] . identifier[target_system] , identifier[self] . identifier[target_component] , identifier[seq] )
keyword[else] :
identifier[self] . identifier[mav] . identifier[waypoint_request_send] ( identifier[self] . identifier[target_system] , identifier[self] . identifier[target_component] , identifier[seq] ) | def waypoint_request_send(self, seq):
"""wrapper for waypoint_request_send"""
if self.mavlink10():
self.mav.mission_request_send(self.target_system, self.target_component, seq) # depends on [control=['if'], data=[]]
else:
self.mav.waypoint_request_send(self.target_system, self.target_component, seq) |
def text_justification(words, max_width):
'''
:type words: list
:type max_width: int
:rtype: list
'''
ret = [] # return value
row_len = 0 # current length of strs in a row
row_words = [] # current words in a row
index = 0 # the index of current word in words
is_first_word = True # is current word the first in a row
while index < len(words):
while row_len <= max_width and index < len(words):
if len(words[index]) > max_width:
raise ValueError("there exists word whose length is larger than max_width")
tmp = row_len
row_words.append(words[index])
tmp += len(words[index])
if not is_first_word:
tmp += 1 # except for the first word, each word should have at least a ' ' before it.
if tmp > max_width:
row_words.pop()
break
row_len = tmp
index += 1
is_first_word = False
# here we have already got a row of str , then we should supplement enough ' ' to make sure the length is max_width.
row = ""
# if the row is the last
if index == len(words):
for word in row_words:
row += (word + ' ')
row = row[:-1]
row += ' ' * (max_width - len(row))
# not the last row and more than one word
elif len(row_words) != 1:
space_num = max_width - row_len
space_num_of_each_interval = space_num // (len(row_words) - 1)
space_num_rest = space_num - space_num_of_each_interval * (len(row_words) - 1)
for j in range(len(row_words)):
row += row_words[j]
if j != len(row_words) - 1:
row += ' ' * (1 + space_num_of_each_interval)
if space_num_rest > 0:
row += ' '
space_num_rest -= 1
# row with only one word
else:
row += row_words[0]
row += ' ' * (max_width - len(row))
ret.append(row)
# after a row , reset those value
row_len = 0
row_words = []
is_first_word = True
return ret | def function[text_justification, parameter[words, max_width]]:
constant[
:type words: list
:type max_width: int
:rtype: list
]
variable[ret] assign[=] list[[]]
variable[row_len] assign[=] constant[0]
variable[row_words] assign[=] list[[]]
variable[index] assign[=] constant[0]
variable[is_first_word] assign[=] constant[True]
while compare[name[index] less[<] call[name[len], parameter[name[words]]]] begin[:]
while <ast.BoolOp object at 0x7da1b209a980> begin[:]
if compare[call[name[len], parameter[call[name[words]][name[index]]]] greater[>] name[max_width]] begin[:]
<ast.Raise object at 0x7da1b209bac0>
variable[tmp] assign[=] name[row_len]
call[name[row_words].append, parameter[call[name[words]][name[index]]]]
<ast.AugAssign object at 0x7da1b20986d0>
if <ast.UnaryOp object at 0x7da1b20985b0> begin[:]
<ast.AugAssign object at 0x7da1b209bbb0>
if compare[name[tmp] greater[>] name[max_width]] begin[:]
call[name[row_words].pop, parameter[]]
break
variable[row_len] assign[=] name[tmp]
<ast.AugAssign object at 0x7da1b209b1f0>
variable[is_first_word] assign[=] constant[False]
variable[row] assign[=] constant[]
if compare[name[index] equal[==] call[name[len], parameter[name[words]]]] begin[:]
for taget[name[word]] in starred[name[row_words]] begin[:]
<ast.AugAssign object at 0x7da1b209bb80>
variable[row] assign[=] call[name[row]][<ast.Slice object at 0x7da1b2099810>]
<ast.AugAssign object at 0x7da1b2099f00>
call[name[ret].append, parameter[name[row]]]
variable[row_len] assign[=] constant[0]
variable[row_words] assign[=] list[[]]
variable[is_first_word] assign[=] constant[True]
return[name[ret]] | keyword[def] identifier[text_justification] ( identifier[words] , identifier[max_width] ):
literal[string]
identifier[ret] =[]
identifier[row_len] = literal[int]
identifier[row_words] =[]
identifier[index] = literal[int]
identifier[is_first_word] = keyword[True]
keyword[while] identifier[index] < identifier[len] ( identifier[words] ):
keyword[while] identifier[row_len] <= identifier[max_width] keyword[and] identifier[index] < identifier[len] ( identifier[words] ):
keyword[if] identifier[len] ( identifier[words] [ identifier[index] ])> identifier[max_width] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[tmp] = identifier[row_len]
identifier[row_words] . identifier[append] ( identifier[words] [ identifier[index] ])
identifier[tmp] += identifier[len] ( identifier[words] [ identifier[index] ])
keyword[if] keyword[not] identifier[is_first_word] :
identifier[tmp] += literal[int]
keyword[if] identifier[tmp] > identifier[max_width] :
identifier[row_words] . identifier[pop] ()
keyword[break]
identifier[row_len] = identifier[tmp]
identifier[index] += literal[int]
identifier[is_first_word] = keyword[False]
identifier[row] = literal[string]
keyword[if] identifier[index] == identifier[len] ( identifier[words] ):
keyword[for] identifier[word] keyword[in] identifier[row_words] :
identifier[row] +=( identifier[word] + literal[string] )
identifier[row] = identifier[row] [:- literal[int] ]
identifier[row] += literal[string] *( identifier[max_width] - identifier[len] ( identifier[row] ))
keyword[elif] identifier[len] ( identifier[row_words] )!= literal[int] :
identifier[space_num] = identifier[max_width] - identifier[row_len]
identifier[space_num_of_each_interval] = identifier[space_num] //( identifier[len] ( identifier[row_words] )- literal[int] )
identifier[space_num_rest] = identifier[space_num] - identifier[space_num_of_each_interval] *( identifier[len] ( identifier[row_words] )- literal[int] )
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[row_words] )):
identifier[row] += identifier[row_words] [ identifier[j] ]
keyword[if] identifier[j] != identifier[len] ( identifier[row_words] )- literal[int] :
identifier[row] += literal[string] *( literal[int] + identifier[space_num_of_each_interval] )
keyword[if] identifier[space_num_rest] > literal[int] :
identifier[row] += literal[string]
identifier[space_num_rest] -= literal[int]
keyword[else] :
identifier[row] += identifier[row_words] [ literal[int] ]
identifier[row] += literal[string] *( identifier[max_width] - identifier[len] ( identifier[row] ))
identifier[ret] . identifier[append] ( identifier[row] )
identifier[row_len] = literal[int]
identifier[row_words] =[]
identifier[is_first_word] = keyword[True]
keyword[return] identifier[ret] | def text_justification(words, max_width):
"""
:type words: list
:type max_width: int
:rtype: list
"""
ret = [] # return value
row_len = 0 # current length of strs in a row
row_words = [] # current words in a row
index = 0 # the index of current word in words
is_first_word = True # is current word the first in a row
while index < len(words):
while row_len <= max_width and index < len(words):
if len(words[index]) > max_width:
raise ValueError('there exists word whose length is larger than max_width') # depends on [control=['if'], data=[]]
tmp = row_len
row_words.append(words[index])
tmp += len(words[index])
if not is_first_word:
tmp += 1 # except for the first word, each word should have at least a ' ' before it. # depends on [control=['if'], data=[]]
if tmp > max_width:
row_words.pop()
break # depends on [control=['if'], data=[]]
row_len = tmp
index += 1
is_first_word = False # depends on [control=['while'], data=[]]
# here we have already got a row of str , then we should supplement enough ' ' to make sure the length is max_width.
row = ''
# if the row is the last
if index == len(words):
for word in row_words:
row += word + ' ' # depends on [control=['for'], data=['word']]
row = row[:-1]
row += ' ' * (max_width - len(row)) # depends on [control=['if'], data=[]]
# not the last row and more than one word
elif len(row_words) != 1:
space_num = max_width - row_len
space_num_of_each_interval = space_num // (len(row_words) - 1)
space_num_rest = space_num - space_num_of_each_interval * (len(row_words) - 1)
for j in range(len(row_words)):
row += row_words[j]
if j != len(row_words) - 1:
row += ' ' * (1 + space_num_of_each_interval) # depends on [control=['if'], data=[]]
if space_num_rest > 0:
row += ' '
space_num_rest -= 1 # depends on [control=['if'], data=['space_num_rest']] # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=[]]
else:
# row with only one word
row += row_words[0]
row += ' ' * (max_width - len(row))
ret.append(row)
# after a row , reset those value
row_len = 0
row_words = []
is_first_word = True # depends on [control=['while'], data=['index']]
return ret |
def collect_scalar_summands(cls, ops, kwargs):
"""Collect :class:`ValueScalar` and :class:`ScalarExpression` summands
Example:
>>> srepr(collect_scalar_summands(Scalar, (1, 2, 3), {}))
'ScalarValue(6)'
>>> collect_scalar_summands(Scalar, (1, 1, -1), {})
One
>>> collect_scalar_summands(Scalar, (1, -1), {})
Zero
>>> Psi = KetSymbol("Psi", hs=0)
>>> Phi = KetSymbol("Phi", hs=0)
>>> braket = BraKet.create(Psi, Phi)
>>> collect_scalar_summands(Scalar, (1, braket, -1), {})
<Psi|Phi>^(0)
>>> collect_scalar_summands(Scalar, (1, 2 * braket, 2, 2 * braket), {})
((3, 4 * <Psi|Phi>^(0)), {})
>>> collect_scalar_summands(Scalar, (2 * braket, -braket, -braket), {})
Zero
"""
# This routine is required because there is no
# "ScalarTimesQuantumExpression" for scalars: we have to extract
# coefficiencts from ScalarTimes instead
from qnet.algebra.core.scalar_algebra import (
Zero, One, Scalar, ScalarTimes, ScalarValue)
a_0 = Zero
coeff_map = OrderedDict()
for op in ops:
if isinstance(op, ScalarValue) or isinstance(op, Scalar._val_types):
a_0 += op
continue
elif isinstance(op, ScalarTimes):
if isinstance(op.operands[0], ScalarValue):
coeff = op.operands[0]
term = op.operands[1]
for sub_op in op.operands[2:]:
term *= sub_op
else:
coeff, term = One, op
else:
coeff, term = One, op
if term in coeff_map:
coeff_map[term] += coeff
else:
coeff_map[term] = coeff
if a_0 == Zero:
fops = []
else:
fops = [a_0]
for (term, coeff) in coeff_map.items():
op = coeff * term
if not op.is_zero:
fops.append(op)
if len(fops) == 0:
return cls._zero
elif len(fops) == 1:
return fops[0]
else:
return tuple(fops), kwargs | def function[collect_scalar_summands, parameter[cls, ops, kwargs]]:
constant[Collect :class:`ValueScalar` and :class:`ScalarExpression` summands
Example:
>>> srepr(collect_scalar_summands(Scalar, (1, 2, 3), {}))
'ScalarValue(6)'
>>> collect_scalar_summands(Scalar, (1, 1, -1), {})
One
>>> collect_scalar_summands(Scalar, (1, -1), {})
Zero
>>> Psi = KetSymbol("Psi", hs=0)
>>> Phi = KetSymbol("Phi", hs=0)
>>> braket = BraKet.create(Psi, Phi)
>>> collect_scalar_summands(Scalar, (1, braket, -1), {})
<Psi|Phi>^(0)
>>> collect_scalar_summands(Scalar, (1, 2 * braket, 2, 2 * braket), {})
((3, 4 * <Psi|Phi>^(0)), {})
>>> collect_scalar_summands(Scalar, (2 * braket, -braket, -braket), {})
Zero
]
from relative_module[qnet.algebra.core.scalar_algebra] import module[Zero], module[One], module[Scalar], module[ScalarTimes], module[ScalarValue]
variable[a_0] assign[=] name[Zero]
variable[coeff_map] assign[=] call[name[OrderedDict], parameter[]]
for taget[name[op]] in starred[name[ops]] begin[:]
if <ast.BoolOp object at 0x7da1b27b8d00> begin[:]
<ast.AugAssign object at 0x7da1b27b8c40>
continue
if compare[name[term] in name[coeff_map]] begin[:]
<ast.AugAssign object at 0x7da1b27b9e70>
if compare[name[a_0] equal[==] name[Zero]] begin[:]
variable[fops] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b27ba4d0>, <ast.Name object at 0x7da1b27ba320>]]] in starred[call[name[coeff_map].items, parameter[]]] begin[:]
variable[op] assign[=] binary_operation[name[coeff] * name[term]]
if <ast.UnaryOp object at 0x7da1b27b9ae0> begin[:]
call[name[fops].append, parameter[name[op]]]
if compare[call[name[len], parameter[name[fops]]] equal[==] constant[0]] begin[:]
return[name[cls]._zero] | keyword[def] identifier[collect_scalar_summands] ( identifier[cls] , identifier[ops] , identifier[kwargs] ):
literal[string]
keyword[from] identifier[qnet] . identifier[algebra] . identifier[core] . identifier[scalar_algebra] keyword[import] (
identifier[Zero] , identifier[One] , identifier[Scalar] , identifier[ScalarTimes] , identifier[ScalarValue] )
identifier[a_0] = identifier[Zero]
identifier[coeff_map] = identifier[OrderedDict] ()
keyword[for] identifier[op] keyword[in] identifier[ops] :
keyword[if] identifier[isinstance] ( identifier[op] , identifier[ScalarValue] ) keyword[or] identifier[isinstance] ( identifier[op] , identifier[Scalar] . identifier[_val_types] ):
identifier[a_0] += identifier[op]
keyword[continue]
keyword[elif] identifier[isinstance] ( identifier[op] , identifier[ScalarTimes] ):
keyword[if] identifier[isinstance] ( identifier[op] . identifier[operands] [ literal[int] ], identifier[ScalarValue] ):
identifier[coeff] = identifier[op] . identifier[operands] [ literal[int] ]
identifier[term] = identifier[op] . identifier[operands] [ literal[int] ]
keyword[for] identifier[sub_op] keyword[in] identifier[op] . identifier[operands] [ literal[int] :]:
identifier[term] *= identifier[sub_op]
keyword[else] :
identifier[coeff] , identifier[term] = identifier[One] , identifier[op]
keyword[else] :
identifier[coeff] , identifier[term] = identifier[One] , identifier[op]
keyword[if] identifier[term] keyword[in] identifier[coeff_map] :
identifier[coeff_map] [ identifier[term] ]+= identifier[coeff]
keyword[else] :
identifier[coeff_map] [ identifier[term] ]= identifier[coeff]
keyword[if] identifier[a_0] == identifier[Zero] :
identifier[fops] =[]
keyword[else] :
identifier[fops] =[ identifier[a_0] ]
keyword[for] ( identifier[term] , identifier[coeff] ) keyword[in] identifier[coeff_map] . identifier[items] ():
identifier[op] = identifier[coeff] * identifier[term]
keyword[if] keyword[not] identifier[op] . identifier[is_zero] :
identifier[fops] . identifier[append] ( identifier[op] )
keyword[if] identifier[len] ( identifier[fops] )== literal[int] :
keyword[return] identifier[cls] . identifier[_zero]
keyword[elif] identifier[len] ( identifier[fops] )== literal[int] :
keyword[return] identifier[fops] [ literal[int] ]
keyword[else] :
keyword[return] identifier[tuple] ( identifier[fops] ), identifier[kwargs] | def collect_scalar_summands(cls, ops, kwargs):
"""Collect :class:`ValueScalar` and :class:`ScalarExpression` summands
Example:
>>> srepr(collect_scalar_summands(Scalar, (1, 2, 3), {}))
'ScalarValue(6)'
>>> collect_scalar_summands(Scalar, (1, 1, -1), {})
One
>>> collect_scalar_summands(Scalar, (1, -1), {})
Zero
>>> Psi = KetSymbol("Psi", hs=0)
>>> Phi = KetSymbol("Phi", hs=0)
>>> braket = BraKet.create(Psi, Phi)
>>> collect_scalar_summands(Scalar, (1, braket, -1), {})
<Psi|Phi>^(0)
>>> collect_scalar_summands(Scalar, (1, 2 * braket, 2, 2 * braket), {})
((3, 4 * <Psi|Phi>^(0)), {})
>>> collect_scalar_summands(Scalar, (2 * braket, -braket, -braket), {})
Zero
"""
# This routine is required because there is no
# "ScalarTimesQuantumExpression" for scalars: we have to extract
# coefficiencts from ScalarTimes instead
from qnet.algebra.core.scalar_algebra import Zero, One, Scalar, ScalarTimes, ScalarValue
a_0 = Zero
coeff_map = OrderedDict()
for op in ops:
if isinstance(op, ScalarValue) or isinstance(op, Scalar._val_types):
a_0 += op
continue # depends on [control=['if'], data=[]]
elif isinstance(op, ScalarTimes):
if isinstance(op.operands[0], ScalarValue):
coeff = op.operands[0]
term = op.operands[1]
for sub_op in op.operands[2:]:
term *= sub_op # depends on [control=['for'], data=['sub_op']] # depends on [control=['if'], data=[]]
else:
(coeff, term) = (One, op) # depends on [control=['if'], data=[]]
else:
(coeff, term) = (One, op)
if term in coeff_map:
coeff_map[term] += coeff # depends on [control=['if'], data=['term', 'coeff_map']]
else:
coeff_map[term] = coeff # depends on [control=['for'], data=['op']]
if a_0 == Zero:
fops = [] # depends on [control=['if'], data=[]]
else:
fops = [a_0]
for (term, coeff) in coeff_map.items():
op = coeff * term
if not op.is_zero:
fops.append(op) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if len(fops) == 0:
return cls._zero # depends on [control=['if'], data=[]]
elif len(fops) == 1:
return fops[0] # depends on [control=['if'], data=[]]
else:
return (tuple(fops), kwargs) |
def delete_load_balancer(access_token, subscription_id, resource_group, lb_name):
'''Delete a load balancer.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
lb_name (str): Name of the load balancer.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/loadBalancers/', lb_name,
'?api-version=', NETWORK_API])
return do_delete(endpoint, access_token) | def function[delete_load_balancer, parameter[access_token, subscription_id, resource_group, lb_name]]:
constant[Delete a load balancer.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
lb_name (str): Name of the load balancer.
Returns:
HTTP response.
]
variable[endpoint] assign[=] call[constant[].join, parameter[list[[<ast.Call object at 0x7da1b05d96f0>, <ast.Constant object at 0x7da1b05d9090>, <ast.Name object at 0x7da1b05db490>, <ast.Constant object at 0x7da1b05d8640>, <ast.Name object at 0x7da1b05d94b0>, <ast.Constant object at 0x7da1b05d90c0>, <ast.Name object at 0x7da1b05db940>, <ast.Constant object at 0x7da1b05d9e70>, <ast.Name object at 0x7da1b05dae00>]]]]
return[call[name[do_delete], parameter[name[endpoint], name[access_token]]]] | keyword[def] identifier[delete_load_balancer] ( identifier[access_token] , identifier[subscription_id] , identifier[resource_group] , identifier[lb_name] ):
literal[string]
identifier[endpoint] = literal[string] . identifier[join] ([ identifier[get_rm_endpoint] (),
literal[string] , identifier[subscription_id] ,
literal[string] , identifier[resource_group] ,
literal[string] , identifier[lb_name] ,
literal[string] , identifier[NETWORK_API] ])
keyword[return] identifier[do_delete] ( identifier[endpoint] , identifier[access_token] ) | def delete_load_balancer(access_token, subscription_id, resource_group, lb_name):
"""Delete a load balancer.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
lb_name (str): Name of the load balancer.
Returns:
HTTP response.
"""
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/loadBalancers/', lb_name, '?api-version=', NETWORK_API])
return do_delete(endpoint, access_token) |
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install) | def function[make_abstract_dist, parameter[req_to_install]]:
constant[Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
]
if name[req_to_install].editable begin[:]
return[call[name[IsSDist], parameter[name[req_to_install]]]] | keyword[def] identifier[make_abstract_dist] ( identifier[req_to_install] ):
literal[string]
keyword[if] identifier[req_to_install] . identifier[editable] :
keyword[return] identifier[IsSDist] ( identifier[req_to_install] )
keyword[elif] identifier[req_to_install] . identifier[link] keyword[and] identifier[req_to_install] . identifier[link] . identifier[is_wheel] :
keyword[return] identifier[IsWheel] ( identifier[req_to_install] )
keyword[else] :
keyword[return] identifier[IsSDist] ( identifier[req_to_install] ) | def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install) # depends on [control=['if'], data=[]]
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install) # depends on [control=['if'], data=[]]
else:
return IsSDist(req_to_install) |
def _set_debug_level(self, debug_level):
"""
:type debug_level: int, between 0-2
:param debug_level: configure verbosity of log
"""
mapping = {
0: logging.ERROR,
1: logging.INFO,
2: logging.DEBUG,
}
self.setLevel(
mapping[min(debug_level, 2)],
) | def function[_set_debug_level, parameter[self, debug_level]]:
constant[
:type debug_level: int, between 0-2
:param debug_level: configure verbosity of log
]
variable[mapping] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e63b0>, <ast.Constant object at 0x7da20c6e73a0>, <ast.Constant object at 0x7da2044c2d10>], [<ast.Attribute object at 0x7da2044c0b50>, <ast.Attribute object at 0x7da2044c17b0>, <ast.Attribute object at 0x7da2044c3d60>]]
call[name[self].setLevel, parameter[call[name[mapping]][call[name[min], parameter[name[debug_level], constant[2]]]]]] | keyword[def] identifier[_set_debug_level] ( identifier[self] , identifier[debug_level] ):
literal[string]
identifier[mapping] ={
literal[int] : identifier[logging] . identifier[ERROR] ,
literal[int] : identifier[logging] . identifier[INFO] ,
literal[int] : identifier[logging] . identifier[DEBUG] ,
}
identifier[self] . identifier[setLevel] (
identifier[mapping] [ identifier[min] ( identifier[debug_level] , literal[int] )],
) | def _set_debug_level(self, debug_level):
"""
:type debug_level: int, between 0-2
:param debug_level: configure verbosity of log
"""
mapping = {0: logging.ERROR, 1: logging.INFO, 2: logging.DEBUG}
self.setLevel(mapping[min(debug_level, 2)]) |
def virtualenv_exists(self, virtualenv_dir=None):
"""
Returns true if the virtual environment has been created.
"""
r = self.local_renderer
ret = True
with self.settings(warn_only=True):
ret = r.run_or_local('ls {virtualenv_dir}') or ''
ret = 'cannot access' not in ret.strip().lower()
if self.verbose:
if ret:
print('Yes')
else:
print('No')
return ret | def function[virtualenv_exists, parameter[self, virtualenv_dir]]:
constant[
Returns true if the virtual environment has been created.
]
variable[r] assign[=] name[self].local_renderer
variable[ret] assign[=] constant[True]
with call[name[self].settings, parameter[]] begin[:]
variable[ret] assign[=] <ast.BoolOp object at 0x7da1b00d8340>
variable[ret] assign[=] compare[constant[cannot access] <ast.NotIn object at 0x7da2590d7190> call[call[name[ret].strip, parameter[]].lower, parameter[]]]
if name[self].verbose begin[:]
if name[ret] begin[:]
call[name[print], parameter[constant[Yes]]]
return[name[ret]] | keyword[def] identifier[virtualenv_exists] ( identifier[self] , identifier[virtualenv_dir] = keyword[None] ):
literal[string]
identifier[r] = identifier[self] . identifier[local_renderer]
identifier[ret] = keyword[True]
keyword[with] identifier[self] . identifier[settings] ( identifier[warn_only] = keyword[True] ):
identifier[ret] = identifier[r] . identifier[run_or_local] ( literal[string] ) keyword[or] literal[string]
identifier[ret] = literal[string] keyword[not] keyword[in] identifier[ret] . identifier[strip] (). identifier[lower] ()
keyword[if] identifier[self] . identifier[verbose] :
keyword[if] identifier[ret] :
identifier[print] ( literal[string] )
keyword[else] :
identifier[print] ( literal[string] )
keyword[return] identifier[ret] | def virtualenv_exists(self, virtualenv_dir=None):
"""
Returns true if the virtual environment has been created.
"""
r = self.local_renderer
ret = True
with self.settings(warn_only=True):
ret = r.run_or_local('ls {virtualenv_dir}') or ''
ret = 'cannot access' not in ret.strip().lower() # depends on [control=['with'], data=[]]
if self.verbose:
if ret:
print('Yes') # depends on [control=['if'], data=[]]
else:
print('No') # depends on [control=['if'], data=[]]
return ret |
def distinct(self, selector=identity):
'''Eliminate duplicate elements from a sequence.
Note: This method uses deferred execution.
Args:
selector: An optional single argument function the result of which
is the value compared for uniqueness against elements already
consumed. If omitted, the element value itself is compared for
uniqueness.
Returns:
Unique elements of the source sequence as determined by the
selector function. Note that it is unprojected elements that are
returned, even if a selector was provided.
Raises:
ValueError: If the Queryable is closed.
TypeError: If the selector is not callable.
'''
if self.closed():
raise ValueError("Attempt to call distinct() on a "
"closed Queryable.")
if not is_callable(selector):
raise TypeError("distinct() parameter selector={0} is "
"not callable".format(repr(selector)))
return self._create(self._generate_distinct_result(selector)) | def function[distinct, parameter[self, selector]]:
constant[Eliminate duplicate elements from a sequence.
Note: This method uses deferred execution.
Args:
selector: An optional single argument function the result of which
is the value compared for uniqueness against elements already
consumed. If omitted, the element value itself is compared for
uniqueness.
Returns:
Unique elements of the source sequence as determined by the
selector function. Note that it is unprojected elements that are
returned, even if a selector was provided.
Raises:
ValueError: If the Queryable is closed.
TypeError: If the selector is not callable.
]
if call[name[self].closed, parameter[]] begin[:]
<ast.Raise object at 0x7da1b1a1ebf0>
if <ast.UnaryOp object at 0x7da1b1a1c6a0> begin[:]
<ast.Raise object at 0x7da1b1a1d570>
return[call[name[self]._create, parameter[call[name[self]._generate_distinct_result, parameter[name[selector]]]]]] | keyword[def] identifier[distinct] ( identifier[self] , identifier[selector] = identifier[identity] ):
literal[string]
keyword[if] identifier[self] . identifier[closed] ():
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] keyword[not] identifier[is_callable] ( identifier[selector] ):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] . identifier[format] ( identifier[repr] ( identifier[selector] )))
keyword[return] identifier[self] . identifier[_create] ( identifier[self] . identifier[_generate_distinct_result] ( identifier[selector] )) | def distinct(self, selector=identity):
"""Eliminate duplicate elements from a sequence.
Note: This method uses deferred execution.
Args:
selector: An optional single argument function the result of which
is the value compared for uniqueness against elements already
consumed. If omitted, the element value itself is compared for
uniqueness.
Returns:
Unique elements of the source sequence as determined by the
selector function. Note that it is unprojected elements that are
returned, even if a selector was provided.
Raises:
ValueError: If the Queryable is closed.
TypeError: If the selector is not callable.
"""
if self.closed():
raise ValueError('Attempt to call distinct() on a closed Queryable.') # depends on [control=['if'], data=[]]
if not is_callable(selector):
raise TypeError('distinct() parameter selector={0} is not callable'.format(repr(selector))) # depends on [control=['if'], data=[]]
return self._create(self._generate_distinct_result(selector)) |
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt | def function[view_similarity_matrix, parameter[self, data, labels, figsize, filename]]:
constant[Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
]
if <ast.UnaryOp object at 0x7da1b0b18580> begin[:]
<ast.Raise object at 0x7da1b0b1b8b0>
if <ast.BoolOp object at 0x7da1b0b19780> begin[:]
call[name[self].get_surface_state, parameter[]]
if compare[name[data] is constant[None]] begin[:]
variable[X] assign[=] name[self].activation_map
variable[corrmat] assign[=] binary_operation[constant[1] - call[name[pairwise_distances], parameter[name[X]]]]
if compare[name[figsize] is constant[None]] begin[:]
variable[figsize] assign[=] tuple[[<ast.Constant object at 0x7da1b0b18ee0>, <ast.Constant object at 0x7da1b0b1bc40>]]
<ast.Tuple object at 0x7da1b0b1a980> assign[=] call[name[plt].subplots, parameter[]]
if compare[name[labels] is constant[None]] begin[:]
variable[xticklabels] assign[=] list[[]]
variable[yticklabels] assign[=] list[[]]
call[name[sns].heatmap, parameter[name[corrmat]]]
call[name[f].tight_layout, parameter[]]
call[name[plt].yticks, parameter[]]
call[name[plt].xticks, parameter[]]
call[name[ax].set_yticklabels, parameter[name[yticklabels]]]
call[name[ax].set_xticklabels, parameter[name[xticklabels]]]
if compare[name[filename] is_not constant[None]] begin[:]
call[name[plt].savefig, parameter[name[filename]]]
return[name[plt]] | keyword[def] identifier[view_similarity_matrix] ( identifier[self] , identifier[data] = keyword[None] , identifier[labels] = keyword[None] , identifier[figsize] = keyword[None] ,
identifier[filename] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[have_heatmap] :
keyword[raise] identifier[Exception] ( literal[string]
literal[string]
literal[string] )
keyword[if] identifier[data] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[activation_map] keyword[is] keyword[None] :
identifier[self] . identifier[get_surface_state] ()
keyword[if] identifier[data] keyword[is] keyword[None] :
identifier[X] = identifier[self] . identifier[activation_map]
keyword[else] :
identifier[X] = identifier[data]
identifier[corrmat] = literal[int] - identifier[pairwise_distances] ( identifier[X] , identifier[metric] = literal[string] )
keyword[if] identifier[figsize] keyword[is] keyword[None] :
identifier[figsize] =( literal[int] , literal[int] )
identifier[f] , identifier[ax] = identifier[plt] . identifier[subplots] ( identifier[figsize] = identifier[figsize] )
keyword[if] identifier[labels] keyword[is] keyword[None] :
identifier[xticklabels] =[]
identifier[yticklabels] =[]
keyword[else] :
identifier[xticklabels] = identifier[labels]
identifier[yticklabels] = identifier[labels]
identifier[sns] . identifier[heatmap] ( identifier[corrmat] , identifier[vmax] = literal[int] , identifier[vmin] =- literal[int] , identifier[square] = keyword[True] ,
identifier[xticklabels] = identifier[xticklabels] , identifier[yticklabels] = identifier[yticklabels] ,
identifier[cmap] = literal[string] , identifier[center] = literal[int] )
identifier[f] . identifier[tight_layout] ()
identifier[plt] . identifier[yticks] ( identifier[rotation] = literal[int] )
identifier[plt] . identifier[xticks] ( identifier[rotation] = literal[int] )
identifier[ax] . identifier[set_yticklabels] ( identifier[yticklabels] , identifier[ha] = literal[string] , identifier[va] = literal[string] , identifier[size] = literal[int] )
identifier[ax] . identifier[set_xticklabels] ( identifier[xticklabels] , identifier[ha] = literal[string] , identifier[va] = literal[string] , identifier[size] = literal[int] )
keyword[if] identifier[filename] keyword[is] keyword[not] keyword[None] :
identifier[plt] . identifier[savefig] ( identifier[filename] , identifier[bbox_inches] = literal[string] )
keyword[else] :
identifier[plt] . identifier[show] ()
keyword[return] identifier[plt] | def view_similarity_matrix(self, data=None, labels=None, figsize=None, filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception('Import dependencies missing for viewing similarity matrix. You must have seaborn and scikit-learn') # depends on [control=['if'], data=[]]
if data is None and self.activation_map is None:
self.get_surface_state() # depends on [control=['if'], data=[]]
if data is None:
X = self.activation_map # depends on [control=['if'], data=[]]
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric='correlation')
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9) # depends on [control=['if'], data=['figsize']]
(f, ax) = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = [] # depends on [control=['if'], data=[]]
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True, xticklabels=xticklabels, yticklabels=yticklabels, cmap='RdBu_r', center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight') # depends on [control=['if'], data=['filename']]
else:
plt.show()
return plt |
def _golbub_welsch(orders, coeff1, coeff2):
"""Recurrence coefficients to abscisas and weights."""
abscisas, weights = [], []
for dim, order in enumerate(orders):
if order:
bands = numpy.zeros((2, order))
bands[0] = coeff1[dim, :order]
bands[1, :-1] = numpy.sqrt(coeff2[dim, 1:order])
vals, vecs = scipy.linalg.eig_banded(bands, lower=True)
abscisa, weight = vals.real, vecs[0, :]**2
indices = numpy.argsort(abscisa)
abscisa, weight = abscisa[indices], weight[indices]
else:
abscisa, weight = numpy.array([coeff1[dim, 0]]), numpy.array([1.])
abscisas.append(abscisa)
weights.append(weight)
return abscisas, weights | def function[_golbub_welsch, parameter[orders, coeff1, coeff2]]:
constant[Recurrence coefficients to abscisas and weights.]
<ast.Tuple object at 0x7da204345960> assign[=] tuple[[<ast.List object at 0x7da204346980>, <ast.List object at 0x7da204346cb0>]]
for taget[tuple[[<ast.Name object at 0x7da204344d30>, <ast.Name object at 0x7da204344580>]]] in starred[call[name[enumerate], parameter[name[orders]]]] begin[:]
if name[order] begin[:]
variable[bands] assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Constant object at 0x7da204346590>, <ast.Name object at 0x7da204347580>]]]]
call[name[bands]][constant[0]] assign[=] call[name[coeff1]][tuple[[<ast.Name object at 0x7da204344490>, <ast.Slice object at 0x7da2043458a0>]]]
call[name[bands]][tuple[[<ast.Constant object at 0x7da204345180>, <ast.Slice object at 0x7da204346e00>]]] assign[=] call[name[numpy].sqrt, parameter[call[name[coeff2]][tuple[[<ast.Name object at 0x7da204346920>, <ast.Slice object at 0x7da204347d00>]]]]]
<ast.Tuple object at 0x7da204347370> assign[=] call[name[scipy].linalg.eig_banded, parameter[name[bands]]]
<ast.Tuple object at 0x7da204347dc0> assign[=] tuple[[<ast.Attribute object at 0x7da2043456f0>, <ast.BinOp object at 0x7da204347ac0>]]
variable[indices] assign[=] call[name[numpy].argsort, parameter[name[abscisa]]]
<ast.Tuple object at 0x7da204347d90> assign[=] tuple[[<ast.Subscript object at 0x7da204345d50>, <ast.Subscript object at 0x7da2043464d0>]]
call[name[abscisas].append, parameter[name[abscisa]]]
call[name[weights].append, parameter[name[weight]]]
return[tuple[[<ast.Name object at 0x7da2044c2650>, <ast.Name object at 0x7da2044c1570>]]] | keyword[def] identifier[_golbub_welsch] ( identifier[orders] , identifier[coeff1] , identifier[coeff2] ):
literal[string]
identifier[abscisas] , identifier[weights] =[],[]
keyword[for] identifier[dim] , identifier[order] keyword[in] identifier[enumerate] ( identifier[orders] ):
keyword[if] identifier[order] :
identifier[bands] = identifier[numpy] . identifier[zeros] (( literal[int] , identifier[order] ))
identifier[bands] [ literal[int] ]= identifier[coeff1] [ identifier[dim] ,: identifier[order] ]
identifier[bands] [ literal[int] ,:- literal[int] ]= identifier[numpy] . identifier[sqrt] ( identifier[coeff2] [ identifier[dim] , literal[int] : identifier[order] ])
identifier[vals] , identifier[vecs] = identifier[scipy] . identifier[linalg] . identifier[eig_banded] ( identifier[bands] , identifier[lower] = keyword[True] )
identifier[abscisa] , identifier[weight] = identifier[vals] . identifier[real] , identifier[vecs] [ literal[int] ,:]** literal[int]
identifier[indices] = identifier[numpy] . identifier[argsort] ( identifier[abscisa] )
identifier[abscisa] , identifier[weight] = identifier[abscisa] [ identifier[indices] ], identifier[weight] [ identifier[indices] ]
keyword[else] :
identifier[abscisa] , identifier[weight] = identifier[numpy] . identifier[array] ([ identifier[coeff1] [ identifier[dim] , literal[int] ]]), identifier[numpy] . identifier[array] ([ literal[int] ])
identifier[abscisas] . identifier[append] ( identifier[abscisa] )
identifier[weights] . identifier[append] ( identifier[weight] )
keyword[return] identifier[abscisas] , identifier[weights] | def _golbub_welsch(orders, coeff1, coeff2):
"""Recurrence coefficients to abscisas and weights."""
(abscisas, weights) = ([], [])
for (dim, order) in enumerate(orders):
if order:
bands = numpy.zeros((2, order))
bands[0] = coeff1[dim, :order]
bands[1, :-1] = numpy.sqrt(coeff2[dim, 1:order])
(vals, vecs) = scipy.linalg.eig_banded(bands, lower=True)
(abscisa, weight) = (vals.real, vecs[0, :] ** 2)
indices = numpy.argsort(abscisa)
(abscisa, weight) = (abscisa[indices], weight[indices]) # depends on [control=['if'], data=[]]
else:
(abscisa, weight) = (numpy.array([coeff1[dim, 0]]), numpy.array([1.0]))
abscisas.append(abscisa)
weights.append(weight) # depends on [control=['for'], data=[]]
return (abscisas, weights) |
def calculate_logevidence(cls, filename, thin_start=None, thin_end=None,
thin_interval=None):
"""Calculates the log evidence from the given file using ``emcee_pt``'s
thermodynamic integration.
Parameters
----------
filename : str
Name of the file to read the samples from. Should be an
``EmceePTFile``.
thin_start : int
Index of the sample to begin returning stats. Default is to read
stats after burn in. To start from the beginning set thin_start
to 0.
thin_interval : int
Interval to accept every i-th sample. Default is to use the
`fp.acl`. If `fp.acl` is not set, then use all stats
(set thin_interval to 1).
thin_end : int
Index of the last sample to read. If not given then
`fp.niterations` is used.
Returns
-------
lnZ : float
The estimate of log of the evidence.
dlnZ : float
The error on the estimate.
"""
with cls._io(filename, 'r') as fp:
logls = fp.read_raw_samples(['loglikelihood'],
thin_start=thin_start,
thin_interval=thin_interval,
thin_end=thin_end,
temps='all', flatten=False)
logls = logls['loglikelihood']
# we need the betas that were used
betas = fp.betas
# annoyingly, theromdynaimc integration in PTSampler is an instance
# method, so we'll implement a dummy one
ntemps = fp.ntemps
nwalkers = fp.nwalkers
ndim = len(fp.variable_params)
dummy_sampler = emcee.PTSampler(ntemps, nwalkers, ndim, None,
None, betas=betas)
return dummy_sampler.thermodynamic_integration_log_evidence(
logls=logls, fburnin=0.) | def function[calculate_logevidence, parameter[cls, filename, thin_start, thin_end, thin_interval]]:
constant[Calculates the log evidence from the given file using ``emcee_pt``'s
thermodynamic integration.
Parameters
----------
filename : str
Name of the file to read the samples from. Should be an
``EmceePTFile``.
thin_start : int
Index of the sample to begin returning stats. Default is to read
stats after burn in. To start from the beginning set thin_start
to 0.
thin_interval : int
Interval to accept every i-th sample. Default is to use the
`fp.acl`. If `fp.acl` is not set, then use all stats
(set thin_interval to 1).
thin_end : int
Index of the last sample to read. If not given then
`fp.niterations` is used.
Returns
-------
lnZ : float
The estimate of log of the evidence.
dlnZ : float
The error on the estimate.
]
with call[name[cls]._io, parameter[name[filename], constant[r]]] begin[:]
variable[logls] assign[=] call[name[fp].read_raw_samples, parameter[list[[<ast.Constant object at 0x7da2041d90c0>]]]]
variable[logls] assign[=] call[name[logls]][constant[loglikelihood]]
variable[betas] assign[=] name[fp].betas
variable[ntemps] assign[=] name[fp].ntemps
variable[nwalkers] assign[=] name[fp].nwalkers
variable[ndim] assign[=] call[name[len], parameter[name[fp].variable_params]]
variable[dummy_sampler] assign[=] call[name[emcee].PTSampler, parameter[name[ntemps], name[nwalkers], name[ndim], constant[None], constant[None]]]
return[call[name[dummy_sampler].thermodynamic_integration_log_evidence, parameter[]]] | keyword[def] identifier[calculate_logevidence] ( identifier[cls] , identifier[filename] , identifier[thin_start] = keyword[None] , identifier[thin_end] = keyword[None] ,
identifier[thin_interval] = keyword[None] ):
literal[string]
keyword[with] identifier[cls] . identifier[_io] ( identifier[filename] , literal[string] ) keyword[as] identifier[fp] :
identifier[logls] = identifier[fp] . identifier[read_raw_samples] ([ literal[string] ],
identifier[thin_start] = identifier[thin_start] ,
identifier[thin_interval] = identifier[thin_interval] ,
identifier[thin_end] = identifier[thin_end] ,
identifier[temps] = literal[string] , identifier[flatten] = keyword[False] )
identifier[logls] = identifier[logls] [ literal[string] ]
identifier[betas] = identifier[fp] . identifier[betas]
identifier[ntemps] = identifier[fp] . identifier[ntemps]
identifier[nwalkers] = identifier[fp] . identifier[nwalkers]
identifier[ndim] = identifier[len] ( identifier[fp] . identifier[variable_params] )
identifier[dummy_sampler] = identifier[emcee] . identifier[PTSampler] ( identifier[ntemps] , identifier[nwalkers] , identifier[ndim] , keyword[None] ,
keyword[None] , identifier[betas] = identifier[betas] )
keyword[return] identifier[dummy_sampler] . identifier[thermodynamic_integration_log_evidence] (
identifier[logls] = identifier[logls] , identifier[fburnin] = literal[int] ) | def calculate_logevidence(cls, filename, thin_start=None, thin_end=None, thin_interval=None):
"""Calculates the log evidence from the given file using ``emcee_pt``'s
thermodynamic integration.
Parameters
----------
filename : str
Name of the file to read the samples from. Should be an
``EmceePTFile``.
thin_start : int
Index of the sample to begin returning stats. Default is to read
stats after burn in. To start from the beginning set thin_start
to 0.
thin_interval : int
Interval to accept every i-th sample. Default is to use the
`fp.acl`. If `fp.acl` is not set, then use all stats
(set thin_interval to 1).
thin_end : int
Index of the last sample to read. If not given then
`fp.niterations` is used.
Returns
-------
lnZ : float
The estimate of log of the evidence.
dlnZ : float
The error on the estimate.
"""
with cls._io(filename, 'r') as fp:
logls = fp.read_raw_samples(['loglikelihood'], thin_start=thin_start, thin_interval=thin_interval, thin_end=thin_end, temps='all', flatten=False)
logls = logls['loglikelihood']
# we need the betas that were used
betas = fp.betas
# annoyingly, theromdynaimc integration in PTSampler is an instance
# method, so we'll implement a dummy one
ntemps = fp.ntemps
nwalkers = fp.nwalkers
ndim = len(fp.variable_params) # depends on [control=['with'], data=['fp']]
dummy_sampler = emcee.PTSampler(ntemps, nwalkers, ndim, None, None, betas=betas)
return dummy_sampler.thermodynamic_integration_log_evidence(logls=logls, fburnin=0.0) |
def getHelpAsString(docstring=False, show_ver=True):
"""
Return useful help from a file in the script directory called
``__taskname__.help``
"""
install_dir = os.path.dirname(__file__)
taskname = util.base_taskname(__taskname__, __package__)
htmlfile = os.path.join(install_dir, 'htmlhelp', taskname + '.html')
helpfile = os.path.join(install_dir, taskname + '.help')
if docstring or (not docstring and not os.path.exists(htmlfile)):
if show_ver:
helpString = "\n{:s} Version {:s} updated on {:s}\n\n".format(
__taskname__, __version__, __version_date__
)
else:
helpString = ''
if os.path.exists(helpfile):
helpString += teal.getHelpFileAsString(taskname, __file__)
elif __doc__ is not None:
helpString += __doc__ + os.linesep
else:
helpString = 'file://' + htmlfile
return helpString | def function[getHelpAsString, parameter[docstring, show_ver]]:
constant[
Return useful help from a file in the script directory called
``__taskname__.help``
]
variable[install_dir] assign[=] call[name[os].path.dirname, parameter[name[__file__]]]
variable[taskname] assign[=] call[name[util].base_taskname, parameter[name[__taskname__], name[__package__]]]
variable[htmlfile] assign[=] call[name[os].path.join, parameter[name[install_dir], constant[htmlhelp], binary_operation[name[taskname] + constant[.html]]]]
variable[helpfile] assign[=] call[name[os].path.join, parameter[name[install_dir], binary_operation[name[taskname] + constant[.help]]]]
if <ast.BoolOp object at 0x7da1b1bb09d0> begin[:]
if name[show_ver] begin[:]
variable[helpString] assign[=] call[constant[
{:s} Version {:s} updated on {:s}
].format, parameter[name[__taskname__], name[__version__], name[__version_date__]]]
if call[name[os].path.exists, parameter[name[helpfile]]] begin[:]
<ast.AugAssign object at 0x7da1b1bb2e30>
return[name[helpString]] | keyword[def] identifier[getHelpAsString] ( identifier[docstring] = keyword[False] , identifier[show_ver] = keyword[True] ):
literal[string]
identifier[install_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] )
identifier[taskname] = identifier[util] . identifier[base_taskname] ( identifier[__taskname__] , identifier[__package__] )
identifier[htmlfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[install_dir] , literal[string] , identifier[taskname] + literal[string] )
identifier[helpfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[install_dir] , identifier[taskname] + literal[string] )
keyword[if] identifier[docstring] keyword[or] ( keyword[not] identifier[docstring] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[htmlfile] )):
keyword[if] identifier[show_ver] :
identifier[helpString] = literal[string] . identifier[format] (
identifier[__taskname__] , identifier[__version__] , identifier[__version_date__]
)
keyword[else] :
identifier[helpString] = literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[helpfile] ):
identifier[helpString] += identifier[teal] . identifier[getHelpFileAsString] ( identifier[taskname] , identifier[__file__] )
keyword[elif] identifier[__doc__] keyword[is] keyword[not] keyword[None] :
identifier[helpString] += identifier[__doc__] + identifier[os] . identifier[linesep]
keyword[else] :
identifier[helpString] = literal[string] + identifier[htmlfile]
keyword[return] identifier[helpString] | def getHelpAsString(docstring=False, show_ver=True):
"""
Return useful help from a file in the script directory called
``__taskname__.help``
"""
install_dir = os.path.dirname(__file__)
taskname = util.base_taskname(__taskname__, __package__)
htmlfile = os.path.join(install_dir, 'htmlhelp', taskname + '.html')
helpfile = os.path.join(install_dir, taskname + '.help')
if docstring or (not docstring and (not os.path.exists(htmlfile))):
if show_ver:
helpString = '\n{:s} Version {:s} updated on {:s}\n\n'.format(__taskname__, __version__, __version_date__) # depends on [control=['if'], data=[]]
else:
helpString = ''
if os.path.exists(helpfile):
helpString += teal.getHelpFileAsString(taskname, __file__) # depends on [control=['if'], data=[]]
elif __doc__ is not None:
helpString += __doc__ + os.linesep # depends on [control=['if'], data=['__doc__']] # depends on [control=['if'], data=[]]
else:
helpString = 'file://' + htmlfile
return helpString |
def add_facts_stream(self, assessment, data_type, options, data):
"""
To add facts data stream to a Assessment
:param datastream: string
:param data_type: string
:param options: dict
:param data: Stream
"""
url = self.get_fact_url(assessment,options)
form_data = {
'files': {
'data': (
Utils.random_string(10)+('.json' if data_type == 'json' else '.csv'),
data,
'text/plain;charset=UTF-8',
{'Expires': '0'}
)
}
}
response = self.http.upstream(url,form_data)
return response | def function[add_facts_stream, parameter[self, assessment, data_type, options, data]]:
constant[
To add facts data stream to a Assessment
:param datastream: string
:param data_type: string
:param options: dict
:param data: Stream
]
variable[url] assign[=] call[name[self].get_fact_url, parameter[name[assessment], name[options]]]
variable[form_data] assign[=] dictionary[[<ast.Constant object at 0x7da18bcc9e10>], [<ast.Dict object at 0x7da18bcc8d00>]]
variable[response] assign[=] call[name[self].http.upstream, parameter[name[url], name[form_data]]]
return[name[response]] | keyword[def] identifier[add_facts_stream] ( identifier[self] , identifier[assessment] , identifier[data_type] , identifier[options] , identifier[data] ):
literal[string]
identifier[url] = identifier[self] . identifier[get_fact_url] ( identifier[assessment] , identifier[options] )
identifier[form_data] ={
literal[string] :{
literal[string] :(
identifier[Utils] . identifier[random_string] ( literal[int] )+( literal[string] keyword[if] identifier[data_type] == literal[string] keyword[else] literal[string] ),
identifier[data] ,
literal[string] ,
{ literal[string] : literal[string] }
)
}
}
identifier[response] = identifier[self] . identifier[http] . identifier[upstream] ( identifier[url] , identifier[form_data] )
keyword[return] identifier[response] | def add_facts_stream(self, assessment, data_type, options, data):
"""
To add facts data stream to a Assessment
:param datastream: string
:param data_type: string
:param options: dict
:param data: Stream
"""
url = self.get_fact_url(assessment, options)
form_data = {'files': {'data': (Utils.random_string(10) + ('.json' if data_type == 'json' else '.csv'), data, 'text/plain;charset=UTF-8', {'Expires': '0'})}}
response = self.http.upstream(url, form_data)
return response |
def authenticate(self, username, password, priv_lvl=TAC_PLUS_PRIV_LVL_MIN,
authen_type=TAC_PLUS_AUTHEN_TYPE_ASCII,
chap_ppp_id=None, chap_challenge=None,
rem_addr=TAC_PLUS_VIRTUAL_REM_ADDR, port=TAC_PLUS_VIRTUAL_PORT):
"""
Authenticate to a TACACS+ server with a username and password.
:param username:
:param password:
:param priv_lvl:
:param authen_type: TAC_PLUS_AUTHEN_TYPE_ASCII,
TAC_PLUS_AUTHEN_TYPE_PAP,
TAC_PLUS_AUTHEN_TYPE_CHAP
:param chap_ppp_id: PPP ID when authen_type == 'chap'
:param chap_challenge: challenge value when authen_type == 'chap'
:param rem_addr: AAA request source, default to TAC_PLUS_VIRTUAL_REM_ADDR
:param port: AAA port, default to TAC_PLUS_VIRTUAL_PORT
:return: TACACSAuthenticationReply
:raises: socket.timeout, socket.error
"""
start_data = six.b('')
if authen_type in (TAC_PLUS_AUTHEN_TYPE_PAP,
TAC_PLUS_AUTHEN_TYPE_CHAP):
self.version_min = TAC_PLUS_MINOR_VER_ONE
if authen_type == TAC_PLUS_AUTHEN_TYPE_PAP:
start_data = six.b(password)
if authen_type == TAC_PLUS_AUTHEN_TYPE_CHAP:
if not isinstance(chap_ppp_id, six.string_types):
raise ValueError('chap_ppp_id must be a string')
if len(chap_ppp_id) != 1:
raise ValueError('chap_ppp_id must be a 1-byte string')
if not isinstance(chap_challenge, six.string_types):
raise ValueError('chap_challenge must be a string')
if len(chap_challenge) > 255:
raise ValueError('chap_challenge may not be more 255 bytes')
start_data = (
six.b(chap_ppp_id) +
six.b(chap_challenge) +
md5(six.b(
chap_ppp_id + password + chap_challenge
)).digest()
)
with self.closing():
packet = self.send(
TACACSAuthenticationStart(username, authen_type, priv_lvl,
start_data, rem_addr=rem_addr, port=port),
TAC_PLUS_AUTHEN
)
reply = TACACSAuthenticationReply.unpacked(packet.body)
logger.debug('\n'.join([
reply.__class__.__name__,
'recv header <%s>' % packet.header,
'recv body <%s>' % reply
]))
if authen_type == TAC_PLUS_AUTHEN_TYPE_ASCII and reply.getpass:
packet = self.send(TACACSAuthenticationContinue(password),
TAC_PLUS_AUTHEN,
packet.seq_no + 1)
reply = TACACSAuthenticationReply.unpacked(packet.body)
logger.debug('\n'.join([
reply.__class__.__name__,
'recv header <%s>' % packet.header,
'recv body <%s>' % reply
]))
if reply.flags == TAC_PLUS_CONTINUE_FLAG_ABORT:
reply.status = TAC_PLUS_AUTHEN_STATUS_FAIL
return reply | def function[authenticate, parameter[self, username, password, priv_lvl, authen_type, chap_ppp_id, chap_challenge, rem_addr, port]]:
constant[
Authenticate to a TACACS+ server with a username and password.
:param username:
:param password:
:param priv_lvl:
:param authen_type: TAC_PLUS_AUTHEN_TYPE_ASCII,
TAC_PLUS_AUTHEN_TYPE_PAP,
TAC_PLUS_AUTHEN_TYPE_CHAP
:param chap_ppp_id: PPP ID when authen_type == 'chap'
:param chap_challenge: challenge value when authen_type == 'chap'
:param rem_addr: AAA request source, default to TAC_PLUS_VIRTUAL_REM_ADDR
:param port: AAA port, default to TAC_PLUS_VIRTUAL_PORT
:return: TACACSAuthenticationReply
:raises: socket.timeout, socket.error
]
variable[start_data] assign[=] call[name[six].b, parameter[constant[]]]
if compare[name[authen_type] in tuple[[<ast.Name object at 0x7da18c4cf070>, <ast.Name object at 0x7da18c4ce320>]]] begin[:]
name[self].version_min assign[=] name[TAC_PLUS_MINOR_VER_ONE]
if compare[name[authen_type] equal[==] name[TAC_PLUS_AUTHEN_TYPE_PAP]] begin[:]
variable[start_data] assign[=] call[name[six].b, parameter[name[password]]]
if compare[name[authen_type] equal[==] name[TAC_PLUS_AUTHEN_TYPE_CHAP]] begin[:]
if <ast.UnaryOp object at 0x7da20c76f1f0> begin[:]
<ast.Raise object at 0x7da20c76ed40>
if compare[call[name[len], parameter[name[chap_ppp_id]]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da20c76e080>
if <ast.UnaryOp object at 0x7da20c76eaa0> begin[:]
<ast.Raise object at 0x7da20c76dea0>
if compare[call[name[len], parameter[name[chap_challenge]]] greater[>] constant[255]] begin[:]
<ast.Raise object at 0x7da20c76d4e0>
variable[start_data] assign[=] binary_operation[binary_operation[call[name[six].b, parameter[name[chap_ppp_id]]] + call[name[six].b, parameter[name[chap_challenge]]]] + call[call[name[md5], parameter[call[name[six].b, parameter[binary_operation[binary_operation[name[chap_ppp_id] + name[password]] + name[chap_challenge]]]]]].digest, parameter[]]]
with call[name[self].closing, parameter[]] begin[:]
variable[packet] assign[=] call[name[self].send, parameter[call[name[TACACSAuthenticationStart], parameter[name[username], name[authen_type], name[priv_lvl], name[start_data]]], name[TAC_PLUS_AUTHEN]]]
variable[reply] assign[=] call[name[TACACSAuthenticationReply].unpacked, parameter[name[packet].body]]
call[name[logger].debug, parameter[call[constant[
].join, parameter[list[[<ast.Attribute object at 0x7da18ede4670>, <ast.BinOp object at 0x7da18ede4550>, <ast.BinOp object at 0x7da18ede4640>]]]]]]
if <ast.BoolOp object at 0x7da18ede6aa0> begin[:]
variable[packet] assign[=] call[name[self].send, parameter[call[name[TACACSAuthenticationContinue], parameter[name[password]]], name[TAC_PLUS_AUTHEN], binary_operation[name[packet].seq_no + constant[1]]]]
variable[reply] assign[=] call[name[TACACSAuthenticationReply].unpacked, parameter[name[packet].body]]
call[name[logger].debug, parameter[call[constant[
].join, parameter[list[[<ast.Attribute object at 0x7da18f810910>, <ast.BinOp object at 0x7da18f812080>, <ast.BinOp object at 0x7da18f8130a0>]]]]]]
if compare[name[reply].flags equal[==] name[TAC_PLUS_CONTINUE_FLAG_ABORT]] begin[:]
name[reply].status assign[=] name[TAC_PLUS_AUTHEN_STATUS_FAIL]
return[name[reply]] | keyword[def] identifier[authenticate] ( identifier[self] , identifier[username] , identifier[password] , identifier[priv_lvl] = identifier[TAC_PLUS_PRIV_LVL_MIN] ,
identifier[authen_type] = identifier[TAC_PLUS_AUTHEN_TYPE_ASCII] ,
identifier[chap_ppp_id] = keyword[None] , identifier[chap_challenge] = keyword[None] ,
identifier[rem_addr] = identifier[TAC_PLUS_VIRTUAL_REM_ADDR] , identifier[port] = identifier[TAC_PLUS_VIRTUAL_PORT] ):
literal[string]
identifier[start_data] = identifier[six] . identifier[b] ( literal[string] )
keyword[if] identifier[authen_type] keyword[in] ( identifier[TAC_PLUS_AUTHEN_TYPE_PAP] ,
identifier[TAC_PLUS_AUTHEN_TYPE_CHAP] ):
identifier[self] . identifier[version_min] = identifier[TAC_PLUS_MINOR_VER_ONE]
keyword[if] identifier[authen_type] == identifier[TAC_PLUS_AUTHEN_TYPE_PAP] :
identifier[start_data] = identifier[six] . identifier[b] ( identifier[password] )
keyword[if] identifier[authen_type] == identifier[TAC_PLUS_AUTHEN_TYPE_CHAP] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[chap_ppp_id] , identifier[six] . identifier[string_types] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[chap_ppp_id] )!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[chap_challenge] , identifier[six] . identifier[string_types] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[chap_challenge] )> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[start_data] =(
identifier[six] . identifier[b] ( identifier[chap_ppp_id] )+
identifier[six] . identifier[b] ( identifier[chap_challenge] )+
identifier[md5] ( identifier[six] . identifier[b] (
identifier[chap_ppp_id] + identifier[password] + identifier[chap_challenge]
)). identifier[digest] ()
)
keyword[with] identifier[self] . identifier[closing] ():
identifier[packet] = identifier[self] . identifier[send] (
identifier[TACACSAuthenticationStart] ( identifier[username] , identifier[authen_type] , identifier[priv_lvl] ,
identifier[start_data] , identifier[rem_addr] = identifier[rem_addr] , identifier[port] = identifier[port] ),
identifier[TAC_PLUS_AUTHEN]
)
identifier[reply] = identifier[TACACSAuthenticationReply] . identifier[unpacked] ( identifier[packet] . identifier[body] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[join] ([
identifier[reply] . identifier[__class__] . identifier[__name__] ,
literal[string] % identifier[packet] . identifier[header] ,
literal[string] % identifier[reply]
]))
keyword[if] identifier[authen_type] == identifier[TAC_PLUS_AUTHEN_TYPE_ASCII] keyword[and] identifier[reply] . identifier[getpass] :
identifier[packet] = identifier[self] . identifier[send] ( identifier[TACACSAuthenticationContinue] ( identifier[password] ),
identifier[TAC_PLUS_AUTHEN] ,
identifier[packet] . identifier[seq_no] + literal[int] )
identifier[reply] = identifier[TACACSAuthenticationReply] . identifier[unpacked] ( identifier[packet] . identifier[body] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[join] ([
identifier[reply] . identifier[__class__] . identifier[__name__] ,
literal[string] % identifier[packet] . identifier[header] ,
literal[string] % identifier[reply]
]))
keyword[if] identifier[reply] . identifier[flags] == identifier[TAC_PLUS_CONTINUE_FLAG_ABORT] :
identifier[reply] . identifier[status] = identifier[TAC_PLUS_AUTHEN_STATUS_FAIL]
keyword[return] identifier[reply] | def authenticate(self, username, password, priv_lvl=TAC_PLUS_PRIV_LVL_MIN, authen_type=TAC_PLUS_AUTHEN_TYPE_ASCII, chap_ppp_id=None, chap_challenge=None, rem_addr=TAC_PLUS_VIRTUAL_REM_ADDR, port=TAC_PLUS_VIRTUAL_PORT):
"""
Authenticate to a TACACS+ server with a username and password.
:param username:
:param password:
:param priv_lvl:
:param authen_type: TAC_PLUS_AUTHEN_TYPE_ASCII,
TAC_PLUS_AUTHEN_TYPE_PAP,
TAC_PLUS_AUTHEN_TYPE_CHAP
:param chap_ppp_id: PPP ID when authen_type == 'chap'
:param chap_challenge: challenge value when authen_type == 'chap'
:param rem_addr: AAA request source, default to TAC_PLUS_VIRTUAL_REM_ADDR
:param port: AAA port, default to TAC_PLUS_VIRTUAL_PORT
:return: TACACSAuthenticationReply
:raises: socket.timeout, socket.error
"""
start_data = six.b('')
if authen_type in (TAC_PLUS_AUTHEN_TYPE_PAP, TAC_PLUS_AUTHEN_TYPE_CHAP):
self.version_min = TAC_PLUS_MINOR_VER_ONE
if authen_type == TAC_PLUS_AUTHEN_TYPE_PAP:
start_data = six.b(password) # depends on [control=['if'], data=[]]
if authen_type == TAC_PLUS_AUTHEN_TYPE_CHAP:
if not isinstance(chap_ppp_id, six.string_types):
raise ValueError('chap_ppp_id must be a string') # depends on [control=['if'], data=[]]
if len(chap_ppp_id) != 1:
raise ValueError('chap_ppp_id must be a 1-byte string') # depends on [control=['if'], data=[]]
if not isinstance(chap_challenge, six.string_types):
raise ValueError('chap_challenge must be a string') # depends on [control=['if'], data=[]]
if len(chap_challenge) > 255:
raise ValueError('chap_challenge may not be more 255 bytes') # depends on [control=['if'], data=[]]
start_data = six.b(chap_ppp_id) + six.b(chap_challenge) + md5(six.b(chap_ppp_id + password + chap_challenge)).digest() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['authen_type']]
with self.closing():
packet = self.send(TACACSAuthenticationStart(username, authen_type, priv_lvl, start_data, rem_addr=rem_addr, port=port), TAC_PLUS_AUTHEN)
reply = TACACSAuthenticationReply.unpacked(packet.body)
logger.debug('\n'.join([reply.__class__.__name__, 'recv header <%s>' % packet.header, 'recv body <%s>' % reply]))
if authen_type == TAC_PLUS_AUTHEN_TYPE_ASCII and reply.getpass:
packet = self.send(TACACSAuthenticationContinue(password), TAC_PLUS_AUTHEN, packet.seq_no + 1)
reply = TACACSAuthenticationReply.unpacked(packet.body)
logger.debug('\n'.join([reply.__class__.__name__, 'recv header <%s>' % packet.header, 'recv body <%s>' % reply]))
if reply.flags == TAC_PLUS_CONTINUE_FLAG_ABORT:
reply.status = TAC_PLUS_AUTHEN_STATUS_FAIL # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
return reply |
def populateFromFile(self, dataUrl):
"""
Populates the instance variables of this ReferencSet from the
data URL.
"""
self._dataUrl = dataUrl
fastaFile = self.getFastaFile()
for referenceName in fastaFile.references:
reference = HtslibReference(self, referenceName)
# TODO break this up into chunks and calculate the MD5
# in bits (say, 64K chunks?)
bases = fastaFile.fetch(referenceName)
md5checksum = hashlib.md5(bases).hexdigest()
reference.setMd5checksum(md5checksum)
reference.setLength(len(bases))
self.addReference(reference) | def function[populateFromFile, parameter[self, dataUrl]]:
constant[
Populates the instance variables of this ReferencSet from the
data URL.
]
name[self]._dataUrl assign[=] name[dataUrl]
variable[fastaFile] assign[=] call[name[self].getFastaFile, parameter[]]
for taget[name[referenceName]] in starred[name[fastaFile].references] begin[:]
variable[reference] assign[=] call[name[HtslibReference], parameter[name[self], name[referenceName]]]
variable[bases] assign[=] call[name[fastaFile].fetch, parameter[name[referenceName]]]
variable[md5checksum] assign[=] call[call[name[hashlib].md5, parameter[name[bases]]].hexdigest, parameter[]]
call[name[reference].setMd5checksum, parameter[name[md5checksum]]]
call[name[reference].setLength, parameter[call[name[len], parameter[name[bases]]]]]
call[name[self].addReference, parameter[name[reference]]] | keyword[def] identifier[populateFromFile] ( identifier[self] , identifier[dataUrl] ):
literal[string]
identifier[self] . identifier[_dataUrl] = identifier[dataUrl]
identifier[fastaFile] = identifier[self] . identifier[getFastaFile] ()
keyword[for] identifier[referenceName] keyword[in] identifier[fastaFile] . identifier[references] :
identifier[reference] = identifier[HtslibReference] ( identifier[self] , identifier[referenceName] )
identifier[bases] = identifier[fastaFile] . identifier[fetch] ( identifier[referenceName] )
identifier[md5checksum] = identifier[hashlib] . identifier[md5] ( identifier[bases] ). identifier[hexdigest] ()
identifier[reference] . identifier[setMd5checksum] ( identifier[md5checksum] )
identifier[reference] . identifier[setLength] ( identifier[len] ( identifier[bases] ))
identifier[self] . identifier[addReference] ( identifier[reference] ) | def populateFromFile(self, dataUrl):
"""
Populates the instance variables of this ReferencSet from the
data URL.
"""
self._dataUrl = dataUrl
fastaFile = self.getFastaFile()
for referenceName in fastaFile.references:
reference = HtslibReference(self, referenceName)
# TODO break this up into chunks and calculate the MD5
# in bits (say, 64K chunks?)
bases = fastaFile.fetch(referenceName)
md5checksum = hashlib.md5(bases).hexdigest()
reference.setMd5checksum(md5checksum)
reference.setLength(len(bases))
self.addReference(reference) # depends on [control=['for'], data=['referenceName']] |
def exec(self, command_str, **command_env):
""" Execute the given command (command will be split into tokens, every space that is a part of a token
must be quoted)
:param command_str: command to execute
:param command_env: command environment
:return: WCommandResultProto
"""
env = self.__vars.copy()
env.update(command_env)
command_tokens = WCommandProto.split_command(command_str)
command_obj = self.commands().select(*command_tokens, **env)
if command_obj is None:
raise WCommandSet.NoCommandFound('No suitable command found: "%s"' % command_str)
result = command_obj.exec(*command_tokens, **env)
self.__track_vars(result)
return result | def function[exec, parameter[self, command_str]]:
constant[ Execute the given command (command will be split into tokens, every space that is a part of a token
must be quoted)
:param command_str: command to execute
:param command_env: command environment
:return: WCommandResultProto
]
variable[env] assign[=] call[name[self].__vars.copy, parameter[]]
call[name[env].update, parameter[name[command_env]]]
variable[command_tokens] assign[=] call[name[WCommandProto].split_command, parameter[name[command_str]]]
variable[command_obj] assign[=] call[call[name[self].commands, parameter[]].select, parameter[<ast.Starred object at 0x7da20c6e5420>]]
if compare[name[command_obj] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c6e6e00>
variable[result] assign[=] call[name[command_obj].exec, parameter[<ast.Starred object at 0x7da20c6e58a0>]]
call[name[self].__track_vars, parameter[name[result]]]
return[name[result]] | keyword[def] identifier[exec] ( identifier[self] , identifier[command_str] ,** identifier[command_env] ):
literal[string]
identifier[env] = identifier[self] . identifier[__vars] . identifier[copy] ()
identifier[env] . identifier[update] ( identifier[command_env] )
identifier[command_tokens] = identifier[WCommandProto] . identifier[split_command] ( identifier[command_str] )
identifier[command_obj] = identifier[self] . identifier[commands] (). identifier[select] (* identifier[command_tokens] ,** identifier[env] )
keyword[if] identifier[command_obj] keyword[is] keyword[None] :
keyword[raise] identifier[WCommandSet] . identifier[NoCommandFound] ( literal[string] % identifier[command_str] )
identifier[result] = identifier[command_obj] . identifier[exec] (* identifier[command_tokens] ,** identifier[env] )
identifier[self] . identifier[__track_vars] ( identifier[result] )
keyword[return] identifier[result] | def exec(self, command_str, **command_env):
""" Execute the given command (command will be split into tokens, every space that is a part of a token
must be quoted)
:param command_str: command to execute
:param command_env: command environment
:return: WCommandResultProto
"""
env = self.__vars.copy()
env.update(command_env)
command_tokens = WCommandProto.split_command(command_str)
command_obj = self.commands().select(*command_tokens, **env)
if command_obj is None:
raise WCommandSet.NoCommandFound('No suitable command found: "%s"' % command_str) # depends on [control=['if'], data=[]]
result = command_obj.exec(*command_tokens, **env)
self.__track_vars(result)
return result |
def remove_file(filepath, dry_run=False):
"""Remove the file at filepath
Catches exception if the file does not exist.
If dry_run is True, print name of file to be removed, but do not remove it.
"""
if dry_run:
sys.stdout.write("rm %s\n" % filepath)
else:
try:
os.remove(filepath)
except OSError:
pass | def function[remove_file, parameter[filepath, dry_run]]:
constant[Remove the file at filepath
Catches exception if the file does not exist.
If dry_run is True, print name of file to be removed, but do not remove it.
]
if name[dry_run] begin[:]
call[name[sys].stdout.write, parameter[binary_operation[constant[rm %s
] <ast.Mod object at 0x7da2590d6920> name[filepath]]]] | keyword[def] identifier[remove_file] ( identifier[filepath] , identifier[dry_run] = keyword[False] ):
literal[string]
keyword[if] identifier[dry_run] :
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] % identifier[filepath] )
keyword[else] :
keyword[try] :
identifier[os] . identifier[remove] ( identifier[filepath] )
keyword[except] identifier[OSError] :
keyword[pass] | def remove_file(filepath, dry_run=False):
"""Remove the file at filepath
Catches exception if the file does not exist.
If dry_run is True, print name of file to be removed, but do not remove it.
"""
if dry_run:
sys.stdout.write('rm %s\n' % filepath) # depends on [control=['if'], data=[]]
else:
try:
os.remove(filepath) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]] |
def median(values, simple=True, mean_weight=0.0):
"""
RETURN MEDIAN VALUE
IF simple=False THEN IN THE EVENT MULTIPLE INSTANCES OF THE
MEDIAN VALUE, THE MEDIAN IS INTERPOLATED BASED ON ITS POSITION
IN THE MEDIAN RANGE
mean_weight IS TO PICK A MEDIAN VALUE IN THE ODD CASE THAT IS
CLOSER TO THE MEAN (PICK A MEDIAN BETWEEN TWO MODES IN BIMODAL CASE)
"""
if OR(v == None for v in values):
Log.error("median is not ready to handle None")
try:
if not values:
return Null
l = len(values)
_sorted = sorted(values)
middle = int(l / 2)
_median = float(_sorted[middle])
if len(_sorted) == 1:
return _median
if simple:
if l % 2 == 0:
return (_sorted[middle - 1] + _median) / 2
return _median
# FIND RANGE OF THE median
start_index = middle - 1
while start_index > 0 and _sorted[start_index] == _median:
start_index -= 1
start_index += 1
stop_index = middle + 1
while stop_index < l and _sorted[stop_index] == _median:
stop_index += 1
num_middle = stop_index - start_index
if l % 2 == 0:
if num_middle == 1:
return (_sorted[middle - 1] + _median) / 2
else:
return (_median - 0.5) + (middle - start_index) / num_middle
else:
if num_middle == 1:
return (1 - mean_weight) * _median + mean_weight * (_sorted[middle - 1] + _sorted[middle + 1]) / 2
else:
return (_median - 0.5) + (middle + 0.5 - start_index) / num_middle
except Exception as e:
Log.error("problem with median of {{values}}", values= values, cause=e) | def function[median, parameter[values, simple, mean_weight]]:
constant[
RETURN MEDIAN VALUE
IF simple=False THEN IN THE EVENT MULTIPLE INSTANCES OF THE
MEDIAN VALUE, THE MEDIAN IS INTERPOLATED BASED ON ITS POSITION
IN THE MEDIAN RANGE
mean_weight IS TO PICK A MEDIAN VALUE IN THE ODD CASE THAT IS
CLOSER TO THE MEAN (PICK A MEDIAN BETWEEN TWO MODES IN BIMODAL CASE)
]
if call[name[OR], parameter[<ast.GeneratorExp object at 0x7da1b0baad40>]] begin[:]
call[name[Log].error, parameter[constant[median is not ready to handle None]]]
<ast.Try object at 0x7da1b0bab760> | keyword[def] identifier[median] ( identifier[values] , identifier[simple] = keyword[True] , identifier[mean_weight] = literal[int] ):
literal[string]
keyword[if] identifier[OR] ( identifier[v] == keyword[None] keyword[for] identifier[v] keyword[in] identifier[values] ):
identifier[Log] . identifier[error] ( literal[string] )
keyword[try] :
keyword[if] keyword[not] identifier[values] :
keyword[return] identifier[Null]
identifier[l] = identifier[len] ( identifier[values] )
identifier[_sorted] = identifier[sorted] ( identifier[values] )
identifier[middle] = identifier[int] ( identifier[l] / literal[int] )
identifier[_median] = identifier[float] ( identifier[_sorted] [ identifier[middle] ])
keyword[if] identifier[len] ( identifier[_sorted] )== literal[int] :
keyword[return] identifier[_median]
keyword[if] identifier[simple] :
keyword[if] identifier[l] % literal[int] == literal[int] :
keyword[return] ( identifier[_sorted] [ identifier[middle] - literal[int] ]+ identifier[_median] )/ literal[int]
keyword[return] identifier[_median]
identifier[start_index] = identifier[middle] - literal[int]
keyword[while] identifier[start_index] > literal[int] keyword[and] identifier[_sorted] [ identifier[start_index] ]== identifier[_median] :
identifier[start_index] -= literal[int]
identifier[start_index] += literal[int]
identifier[stop_index] = identifier[middle] + literal[int]
keyword[while] identifier[stop_index] < identifier[l] keyword[and] identifier[_sorted] [ identifier[stop_index] ]== identifier[_median] :
identifier[stop_index] += literal[int]
identifier[num_middle] = identifier[stop_index] - identifier[start_index]
keyword[if] identifier[l] % literal[int] == literal[int] :
keyword[if] identifier[num_middle] == literal[int] :
keyword[return] ( identifier[_sorted] [ identifier[middle] - literal[int] ]+ identifier[_median] )/ literal[int]
keyword[else] :
keyword[return] ( identifier[_median] - literal[int] )+( identifier[middle] - identifier[start_index] )/ identifier[num_middle]
keyword[else] :
keyword[if] identifier[num_middle] == literal[int] :
keyword[return] ( literal[int] - identifier[mean_weight] )* identifier[_median] + identifier[mean_weight] *( identifier[_sorted] [ identifier[middle] - literal[int] ]+ identifier[_sorted] [ identifier[middle] + literal[int] ])/ literal[int]
keyword[else] :
keyword[return] ( identifier[_median] - literal[int] )+( identifier[middle] + literal[int] - identifier[start_index] )/ identifier[num_middle]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[Log] . identifier[error] ( literal[string] , identifier[values] = identifier[values] , identifier[cause] = identifier[e] ) | def median(values, simple=True, mean_weight=0.0):
"""
RETURN MEDIAN VALUE
IF simple=False THEN IN THE EVENT MULTIPLE INSTANCES OF THE
MEDIAN VALUE, THE MEDIAN IS INTERPOLATED BASED ON ITS POSITION
IN THE MEDIAN RANGE
mean_weight IS TO PICK A MEDIAN VALUE IN THE ODD CASE THAT IS
CLOSER TO THE MEAN (PICK A MEDIAN BETWEEN TWO MODES IN BIMODAL CASE)
"""
if OR((v == None for v in values)):
Log.error('median is not ready to handle None') # depends on [control=['if'], data=[]]
try:
if not values:
return Null # depends on [control=['if'], data=[]]
l = len(values)
_sorted = sorted(values)
middle = int(l / 2)
_median = float(_sorted[middle])
if len(_sorted) == 1:
return _median # depends on [control=['if'], data=[]]
if simple:
if l % 2 == 0:
return (_sorted[middle - 1] + _median) / 2 # depends on [control=['if'], data=[]]
return _median # depends on [control=['if'], data=[]]
# FIND RANGE OF THE median
start_index = middle - 1
while start_index > 0 and _sorted[start_index] == _median:
start_index -= 1 # depends on [control=['while'], data=[]]
start_index += 1
stop_index = middle + 1
while stop_index < l and _sorted[stop_index] == _median:
stop_index += 1 # depends on [control=['while'], data=[]]
num_middle = stop_index - start_index
if l % 2 == 0:
if num_middle == 1:
return (_sorted[middle - 1] + _median) / 2 # depends on [control=['if'], data=[]]
else:
return _median - 0.5 + (middle - start_index) / num_middle # depends on [control=['if'], data=[]]
elif num_middle == 1:
return (1 - mean_weight) * _median + mean_weight * (_sorted[middle - 1] + _sorted[middle + 1]) / 2 # depends on [control=['if'], data=[]]
else:
return _median - 0.5 + (middle + 0.5 - start_index) / num_middle # depends on [control=['try'], data=[]]
except Exception as e:
Log.error('problem with median of {{values}}', values=values, cause=e) # depends on [control=['except'], data=['e']] |
def Loc(kind, loc=None):
"""A rule that accepts a token of kind ``kind`` and returns its location, or returns None."""
@llrule(loc, lambda parser: [kind])
def rule(parser):
result = parser._accept(kind)
if result is unmatched:
return result
return result.loc
return rule | def function[Loc, parameter[kind, loc]]:
constant[A rule that accepts a token of kind ``kind`` and returns its location, or returns None.]
def function[rule, parameter[parser]]:
variable[result] assign[=] call[name[parser]._accept, parameter[name[kind]]]
if compare[name[result] is name[unmatched]] begin[:]
return[name[result]]
return[name[result].loc]
return[name[rule]] | keyword[def] identifier[Loc] ( identifier[kind] , identifier[loc] = keyword[None] ):
literal[string]
@ identifier[llrule] ( identifier[loc] , keyword[lambda] identifier[parser] :[ identifier[kind] ])
keyword[def] identifier[rule] ( identifier[parser] ):
identifier[result] = identifier[parser] . identifier[_accept] ( identifier[kind] )
keyword[if] identifier[result] keyword[is] identifier[unmatched] :
keyword[return] identifier[result]
keyword[return] identifier[result] . identifier[loc]
keyword[return] identifier[rule] | def Loc(kind, loc=None):
"""A rule that accepts a token of kind ``kind`` and returns its location, or returns None."""
@llrule(loc, lambda parser: [kind])
def rule(parser):
result = parser._accept(kind)
if result is unmatched:
return result # depends on [control=['if'], data=['result']]
return result.loc
return rule |
def _delete(self, route, headers=None, failure_message=None):
"""
Execute a delete request and return the result
:param headers:
:return:
"""
headers = self._get_headers(headers)
response_lambda = (lambda: requests.delete(
self._get_qualified_route(route), headers=headers, verify=False, proxies=self.proxies)
)
response = check_for_rate_limiting(response_lambda(), response_lambda)
return self._handle_response(response, failure_message) | def function[_delete, parameter[self, route, headers, failure_message]]:
constant[
Execute a delete request and return the result
:param headers:
:return:
]
variable[headers] assign[=] call[name[self]._get_headers, parameter[name[headers]]]
variable[response_lambda] assign[=] <ast.Lambda object at 0x7da20e9b0160>
variable[response] assign[=] call[name[check_for_rate_limiting], parameter[call[name[response_lambda], parameter[]], name[response_lambda]]]
return[call[name[self]._handle_response, parameter[name[response], name[failure_message]]]] | keyword[def] identifier[_delete] ( identifier[self] , identifier[route] , identifier[headers] = keyword[None] , identifier[failure_message] = keyword[None] ):
literal[string]
identifier[headers] = identifier[self] . identifier[_get_headers] ( identifier[headers] )
identifier[response_lambda] =( keyword[lambda] : identifier[requests] . identifier[delete] (
identifier[self] . identifier[_get_qualified_route] ( identifier[route] ), identifier[headers] = identifier[headers] , identifier[verify] = keyword[False] , identifier[proxies] = identifier[self] . identifier[proxies] )
)
identifier[response] = identifier[check_for_rate_limiting] ( identifier[response_lambda] (), identifier[response_lambda] )
keyword[return] identifier[self] . identifier[_handle_response] ( identifier[response] , identifier[failure_message] ) | def _delete(self, route, headers=None, failure_message=None):
"""
Execute a delete request and return the result
:param headers:
:return:
"""
headers = self._get_headers(headers)
response_lambda = lambda : requests.delete(self._get_qualified_route(route), headers=headers, verify=False, proxies=self.proxies)
response = check_for_rate_limiting(response_lambda(), response_lambda)
return self._handle_response(response, failure_message) |
def find(self, resource_id, query=None):
"""
Finds a single resource by ID related to the current space.
"""
return self.proxy.find(resource_id, query=query) | def function[find, parameter[self, resource_id, query]]:
constant[
Finds a single resource by ID related to the current space.
]
return[call[name[self].proxy.find, parameter[name[resource_id]]]] | keyword[def] identifier[find] ( identifier[self] , identifier[resource_id] , identifier[query] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[proxy] . identifier[find] ( identifier[resource_id] , identifier[query] = identifier[query] ) | def find(self, resource_id, query=None):
"""
Finds a single resource by ID related to the current space.
"""
return self.proxy.find(resource_id, query=query) |
def get_client_from_json_dict(client_class, config_dict, **kwargs):
"""Return a SDK client initialized with a JSON auth dict.
The easiest way to obtain this content is to call the following CLI commands:
.. code:: bash
az ad sp create-for-rbac --sdk-auth
This method will fill automatically the following client parameters:
- credentials
- subscription_id
- base_url
- tenant_id
Parameters provided in kwargs will override parameters and be passed directly to the client.
:Example:
.. code:: python
from azure.common.client_factory import get_client_from_auth_file
from azure.mgmt.compute import ComputeManagementClient
config_dict = {
"clientId": "ad735158-65ca-11e7-ba4d-ecb1d756380e",
"clientSecret": "b70bb224-65ca-11e7-810c-ecb1d756380e",
"subscriptionId": "bfc42d3a-65ca-11e7-95cf-ecb1d756380e",
"tenantId": "c81da1d8-65ca-11e7-b1d1-ecb1d756380e",
"activeDirectoryEndpointUrl": "https://login.microsoftonline.com",
"resourceManagerEndpointUrl": "https://management.azure.com/",
"activeDirectoryGraphResourceId": "https://graph.windows.net/",
"sqlManagementEndpointUrl": "https://management.core.windows.net:8443/",
"galleryEndpointUrl": "https://gallery.azure.com/",
"managementEndpointUrl": "https://management.core.windows.net/"
}
client = get_client_from_json_dict(ComputeManagementClient, config_dict)
.. versionadded:: 1.1.7
:param client_class: A SDK client class
:param dict config_dict: A config dict.
:return: An instantiated client
"""
is_graphrbac = client_class.__name__ == 'GraphRbacManagementClient'
parameters = {
'subscription_id': config_dict.get('subscriptionId'),
'base_url': config_dict.get('resourceManagerEndpointUrl'),
'tenant_id': config_dict.get('tenantId') # GraphRbac
}
if is_graphrbac:
parameters['base_url'] = config_dict['activeDirectoryGraphResourceId']
if 'credentials' not in kwargs:
# Get the right resource for Credentials
if is_graphrbac:
resource = config_dict['activeDirectoryGraphResourceId']
else:
if "activeDirectoryResourceId" not in config_dict and 'resourceManagerEndpointUrl' not in config_dict:
raise ValueError("Need activeDirectoryResourceId or resourceManagerEndpointUrl key")
resource = config_dict.get('activeDirectoryResourceId', config_dict['resourceManagerEndpointUrl'])
authority_url = config_dict['activeDirectoryEndpointUrl']
is_adfs = bool(re.match('.+(/adfs|/adfs/)$', authority_url, re.I))
if is_adfs:
authority_url = authority_url.rstrip('/') # workaround: ADAL is known to reject auth urls with trailing /
else:
authority_url = authority_url + '/' + config_dict['tenantId']
context = adal.AuthenticationContext(
authority_url,
api_version=None,
validate_authority=not is_adfs
)
parameters['credentials'] = AdalAuthentication(
context.acquire_token_with_client_credentials,
resource,
config_dict['clientId'],
config_dict['clientSecret']
)
parameters.update(kwargs)
return _instantiate_client(client_class, **parameters) | def function[get_client_from_json_dict, parameter[client_class, config_dict]]:
constant[Return a SDK client initialized with a JSON auth dict.
The easiest way to obtain this content is to call the following CLI commands:
.. code:: bash
az ad sp create-for-rbac --sdk-auth
This method will fill automatically the following client parameters:
- credentials
- subscription_id
- base_url
- tenant_id
Parameters provided in kwargs will override parameters and be passed directly to the client.
:Example:
.. code:: python
from azure.common.client_factory import get_client_from_auth_file
from azure.mgmt.compute import ComputeManagementClient
config_dict = {
"clientId": "ad735158-65ca-11e7-ba4d-ecb1d756380e",
"clientSecret": "b70bb224-65ca-11e7-810c-ecb1d756380e",
"subscriptionId": "bfc42d3a-65ca-11e7-95cf-ecb1d756380e",
"tenantId": "c81da1d8-65ca-11e7-b1d1-ecb1d756380e",
"activeDirectoryEndpointUrl": "https://login.microsoftonline.com",
"resourceManagerEndpointUrl": "https://management.azure.com/",
"activeDirectoryGraphResourceId": "https://graph.windows.net/",
"sqlManagementEndpointUrl": "https://management.core.windows.net:8443/",
"galleryEndpointUrl": "https://gallery.azure.com/",
"managementEndpointUrl": "https://management.core.windows.net/"
}
client = get_client_from_json_dict(ComputeManagementClient, config_dict)
.. versionadded:: 1.1.7
:param client_class: A SDK client class
:param dict config_dict: A config dict.
:return: An instantiated client
]
variable[is_graphrbac] assign[=] compare[name[client_class].__name__ equal[==] constant[GraphRbacManagementClient]]
variable[parameters] assign[=] dictionary[[<ast.Constant object at 0x7da1b03567a0>, <ast.Constant object at 0x7da1b03540a0>, <ast.Constant object at 0x7da1b0357e80>], [<ast.Call object at 0x7da1b0357790>, <ast.Call object at 0x7da1b0356e00>, <ast.Call object at 0x7da1b0354eb0>]]
if name[is_graphrbac] begin[:]
call[name[parameters]][constant[base_url]] assign[=] call[name[config_dict]][constant[activeDirectoryGraphResourceId]]
if compare[constant[credentials] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
if name[is_graphrbac] begin[:]
variable[resource] assign[=] call[name[config_dict]][constant[activeDirectoryGraphResourceId]]
variable[authority_url] assign[=] call[name[config_dict]][constant[activeDirectoryEndpointUrl]]
variable[is_adfs] assign[=] call[name[bool], parameter[call[name[re].match, parameter[constant[.+(/adfs|/adfs/)$], name[authority_url], name[re].I]]]]
if name[is_adfs] begin[:]
variable[authority_url] assign[=] call[name[authority_url].rstrip, parameter[constant[/]]]
variable[context] assign[=] call[name[adal].AuthenticationContext, parameter[name[authority_url]]]
call[name[parameters]][constant[credentials]] assign[=] call[name[AdalAuthentication], parameter[name[context].acquire_token_with_client_credentials, name[resource], call[name[config_dict]][constant[clientId]], call[name[config_dict]][constant[clientSecret]]]]
call[name[parameters].update, parameter[name[kwargs]]]
return[call[name[_instantiate_client], parameter[name[client_class]]]] | keyword[def] identifier[get_client_from_json_dict] ( identifier[client_class] , identifier[config_dict] ,** identifier[kwargs] ):
literal[string]
identifier[is_graphrbac] = identifier[client_class] . identifier[__name__] == literal[string]
identifier[parameters] ={
literal[string] : identifier[config_dict] . identifier[get] ( literal[string] ),
literal[string] : identifier[config_dict] . identifier[get] ( literal[string] ),
literal[string] : identifier[config_dict] . identifier[get] ( literal[string] )
}
keyword[if] identifier[is_graphrbac] :
identifier[parameters] [ literal[string] ]= identifier[config_dict] [ literal[string] ]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
keyword[if] identifier[is_graphrbac] :
identifier[resource] = identifier[config_dict] [ literal[string] ]
keyword[else] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[config_dict] keyword[and] literal[string] keyword[not] keyword[in] identifier[config_dict] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[resource] = identifier[config_dict] . identifier[get] ( literal[string] , identifier[config_dict] [ literal[string] ])
identifier[authority_url] = identifier[config_dict] [ literal[string] ]
identifier[is_adfs] = identifier[bool] ( identifier[re] . identifier[match] ( literal[string] , identifier[authority_url] , identifier[re] . identifier[I] ))
keyword[if] identifier[is_adfs] :
identifier[authority_url] = identifier[authority_url] . identifier[rstrip] ( literal[string] )
keyword[else] :
identifier[authority_url] = identifier[authority_url] + literal[string] + identifier[config_dict] [ literal[string] ]
identifier[context] = identifier[adal] . identifier[AuthenticationContext] (
identifier[authority_url] ,
identifier[api_version] = keyword[None] ,
identifier[validate_authority] = keyword[not] identifier[is_adfs]
)
identifier[parameters] [ literal[string] ]= identifier[AdalAuthentication] (
identifier[context] . identifier[acquire_token_with_client_credentials] ,
identifier[resource] ,
identifier[config_dict] [ literal[string] ],
identifier[config_dict] [ literal[string] ]
)
identifier[parameters] . identifier[update] ( identifier[kwargs] )
keyword[return] identifier[_instantiate_client] ( identifier[client_class] ,** identifier[parameters] ) | def get_client_from_json_dict(client_class, config_dict, **kwargs):
"""Return a SDK client initialized with a JSON auth dict.
The easiest way to obtain this content is to call the following CLI commands:
.. code:: bash
az ad sp create-for-rbac --sdk-auth
This method will fill automatically the following client parameters:
- credentials
- subscription_id
- base_url
- tenant_id
Parameters provided in kwargs will override parameters and be passed directly to the client.
:Example:
.. code:: python
from azure.common.client_factory import get_client_from_auth_file
from azure.mgmt.compute import ComputeManagementClient
config_dict = {
"clientId": "ad735158-65ca-11e7-ba4d-ecb1d756380e",
"clientSecret": "b70bb224-65ca-11e7-810c-ecb1d756380e",
"subscriptionId": "bfc42d3a-65ca-11e7-95cf-ecb1d756380e",
"tenantId": "c81da1d8-65ca-11e7-b1d1-ecb1d756380e",
"activeDirectoryEndpointUrl": "https://login.microsoftonline.com",
"resourceManagerEndpointUrl": "https://management.azure.com/",
"activeDirectoryGraphResourceId": "https://graph.windows.net/",
"sqlManagementEndpointUrl": "https://management.core.windows.net:8443/",
"galleryEndpointUrl": "https://gallery.azure.com/",
"managementEndpointUrl": "https://management.core.windows.net/"
}
client = get_client_from_json_dict(ComputeManagementClient, config_dict)
.. versionadded:: 1.1.7
:param client_class: A SDK client class
:param dict config_dict: A config dict.
:return: An instantiated client
"""
is_graphrbac = client_class.__name__ == 'GraphRbacManagementClient' # GraphRbac
parameters = {'subscription_id': config_dict.get('subscriptionId'), 'base_url': config_dict.get('resourceManagerEndpointUrl'), 'tenant_id': config_dict.get('tenantId')}
if is_graphrbac:
parameters['base_url'] = config_dict['activeDirectoryGraphResourceId'] # depends on [control=['if'], data=[]]
if 'credentials' not in kwargs:
# Get the right resource for Credentials
if is_graphrbac:
resource = config_dict['activeDirectoryGraphResourceId'] # depends on [control=['if'], data=[]]
else:
if 'activeDirectoryResourceId' not in config_dict and 'resourceManagerEndpointUrl' not in config_dict:
raise ValueError('Need activeDirectoryResourceId or resourceManagerEndpointUrl key') # depends on [control=['if'], data=[]]
resource = config_dict.get('activeDirectoryResourceId', config_dict['resourceManagerEndpointUrl'])
authority_url = config_dict['activeDirectoryEndpointUrl']
is_adfs = bool(re.match('.+(/adfs|/adfs/)$', authority_url, re.I))
if is_adfs:
authority_url = authority_url.rstrip('/') # workaround: ADAL is known to reject auth urls with trailing / # depends on [control=['if'], data=[]]
else:
authority_url = authority_url + '/' + config_dict['tenantId']
context = adal.AuthenticationContext(authority_url, api_version=None, validate_authority=not is_adfs)
parameters['credentials'] = AdalAuthentication(context.acquire_token_with_client_credentials, resource, config_dict['clientId'], config_dict['clientSecret']) # depends on [control=['if'], data=[]]
parameters.update(kwargs)
return _instantiate_client(client_class, **parameters) |
def load_resource_module(self):
"""Fetch the resource list"""
# Attempt to load the dependencies module
try:
name = '{}.{}'.format(self.name, 'dependencies')
self.dependencies_module = importlib.import_module(name)
except ModuleNotFoundError as err:
raise EffectError(
(
"Effect package '{}' has no 'dependencies' module or the module has errors. "
"Forwarded error from importlib: {}"
).format(self.name, err))
# Fetch the resource descriptions
try:
self.resources = getattr(self.dependencies_module, 'resources')
except AttributeError:
raise EffectError("Effect dependencies module '{}' has no 'resources' attribute".format(name))
if not isinstance(self.resources, list):
raise EffectError(
"Effect dependencies module '{}': 'resources' is of type {} instead of a list".format(
name, type(self.resources)))
# Fetch the effect class list
try:
self.effect_packages = getattr(self.dependencies_module, 'effect_packages')
except AttributeError:
raise EffectError("Effect dependencies module '{}' has 'effect_packages' attribute".format(name))
if not isinstance(self.effect_packages, list):
raise EffectError(
"Effect dependencies module '{}': 'effect_packages' is of type {} instead of a list".format(
name, type(self.effects))) | def function[load_resource_module, parameter[self]]:
constant[Fetch the resource list]
<ast.Try object at 0x7da1b23471f0>
<ast.Try object at 0x7da1b23461d0>
if <ast.UnaryOp object at 0x7da1b2347220> begin[:]
<ast.Raise object at 0x7da1b2347820>
<ast.Try object at 0x7da18dc99960>
if <ast.UnaryOp object at 0x7da18dc99060> begin[:]
<ast.Raise object at 0x7da2054a7dc0> | keyword[def] identifier[load_resource_module] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[name] = literal[string] . identifier[format] ( identifier[self] . identifier[name] , literal[string] )
identifier[self] . identifier[dependencies_module] = identifier[importlib] . identifier[import_module] ( identifier[name] )
keyword[except] identifier[ModuleNotFoundError] keyword[as] identifier[err] :
keyword[raise] identifier[EffectError] (
(
literal[string]
literal[string]
). identifier[format] ( identifier[self] . identifier[name] , identifier[err] ))
keyword[try] :
identifier[self] . identifier[resources] = identifier[getattr] ( identifier[self] . identifier[dependencies_module] , literal[string] )
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[EffectError] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[resources] , identifier[list] ):
keyword[raise] identifier[EffectError] (
literal[string] . identifier[format] (
identifier[name] , identifier[type] ( identifier[self] . identifier[resources] )))
keyword[try] :
identifier[self] . identifier[effect_packages] = identifier[getattr] ( identifier[self] . identifier[dependencies_module] , literal[string] )
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[EffectError] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[effect_packages] , identifier[list] ):
keyword[raise] identifier[EffectError] (
literal[string] . identifier[format] (
identifier[name] , identifier[type] ( identifier[self] . identifier[effects] ))) | def load_resource_module(self):
"""Fetch the resource list"""
# Attempt to load the dependencies module
try:
name = '{}.{}'.format(self.name, 'dependencies')
self.dependencies_module = importlib.import_module(name) # depends on [control=['try'], data=[]]
except ModuleNotFoundError as err:
raise EffectError("Effect package '{}' has no 'dependencies' module or the module has errors. Forwarded error from importlib: {}".format(self.name, err)) # depends on [control=['except'], data=['err']]
# Fetch the resource descriptions
try:
self.resources = getattr(self.dependencies_module, 'resources') # depends on [control=['try'], data=[]]
except AttributeError:
raise EffectError("Effect dependencies module '{}' has no 'resources' attribute".format(name)) # depends on [control=['except'], data=[]]
if not isinstance(self.resources, list):
raise EffectError("Effect dependencies module '{}': 'resources' is of type {} instead of a list".format(name, type(self.resources))) # depends on [control=['if'], data=[]]
# Fetch the effect class list
try:
self.effect_packages = getattr(self.dependencies_module, 'effect_packages') # depends on [control=['try'], data=[]]
except AttributeError:
raise EffectError("Effect dependencies module '{}' has 'effect_packages' attribute".format(name)) # depends on [control=['except'], data=[]]
if not isinstance(self.effect_packages, list):
raise EffectError("Effect dependencies module '{}': 'effect_packages' is of type {} instead of a list".format(name, type(self.effects))) # depends on [control=['if'], data=[]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.