code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def periods(dts, phi=0.0):
"""For an ensemble of oscillators, return the set of periods lengths of
all successive oscillations of all oscillators.
An individual oscillation is defined to start and end when the phase
passes phi (by default zero) after completing a full cycle.
If the timeseries of an oscillator phase begins (or ends) exactly at phi,
then the first (or last) oscillation will be included.
Arguments:
dts (DistTimeseries): where dts.shape[1] is 1 (single output variable
representing phase) and axis 2 ranges over multiple realizations of
the oscillator.
phi=0.0: float
A single oscillation starts and ends at phase phi (by default zero).
"""
vperiods = distob.vectorize(analyses1.periods)
all_periods = vperiods(dts, phi)
if hasattr(type(all_periods), '__array_interface__'):
return np.ravel(all_periods)
else:
return np.hstack([distob.gather(plist) for plist in all_periods]) | def function[periods, parameter[dts, phi]]:
constant[For an ensemble of oscillators, return the set of periods lengths of
all successive oscillations of all oscillators.
An individual oscillation is defined to start and end when the phase
passes phi (by default zero) after completing a full cycle.
If the timeseries of an oscillator phase begins (or ends) exactly at phi,
then the first (or last) oscillation will be included.
Arguments:
dts (DistTimeseries): where dts.shape[1] is 1 (single output variable
representing phase) and axis 2 ranges over multiple realizations of
the oscillator.
phi=0.0: float
A single oscillation starts and ends at phase phi (by default zero).
]
variable[vperiods] assign[=] call[name[distob].vectorize, parameter[name[analyses1].periods]]
variable[all_periods] assign[=] call[name[vperiods], parameter[name[dts], name[phi]]]
if call[name[hasattr], parameter[call[name[type], parameter[name[all_periods]]], constant[__array_interface__]]] begin[:]
return[call[name[np].ravel, parameter[name[all_periods]]]] | keyword[def] identifier[periods] ( identifier[dts] , identifier[phi] = literal[int] ):
literal[string]
identifier[vperiods] = identifier[distob] . identifier[vectorize] ( identifier[analyses1] . identifier[periods] )
identifier[all_periods] = identifier[vperiods] ( identifier[dts] , identifier[phi] )
keyword[if] identifier[hasattr] ( identifier[type] ( identifier[all_periods] ), literal[string] ):
keyword[return] identifier[np] . identifier[ravel] ( identifier[all_periods] )
keyword[else] :
keyword[return] identifier[np] . identifier[hstack] ([ identifier[distob] . identifier[gather] ( identifier[plist] ) keyword[for] identifier[plist] keyword[in] identifier[all_periods] ]) | def periods(dts, phi=0.0):
"""For an ensemble of oscillators, return the set of periods lengths of
all successive oscillations of all oscillators.
An individual oscillation is defined to start and end when the phase
passes phi (by default zero) after completing a full cycle.
If the timeseries of an oscillator phase begins (or ends) exactly at phi,
then the first (or last) oscillation will be included.
Arguments:
dts (DistTimeseries): where dts.shape[1] is 1 (single output variable
representing phase) and axis 2 ranges over multiple realizations of
the oscillator.
phi=0.0: float
A single oscillation starts and ends at phase phi (by default zero).
"""
vperiods = distob.vectorize(analyses1.periods)
all_periods = vperiods(dts, phi)
if hasattr(type(all_periods), '__array_interface__'):
return np.ravel(all_periods) # depends on [control=['if'], data=[]]
else:
return np.hstack([distob.gather(plist) for plist in all_periods]) |
def ser_a_trous(C0, filter, scale):
"""
The following is a serial implementation of the a trous algorithm. Accepts the following parameters:
INPUTS:
filter (no default): The filter-bank which is applied to the components of the transform.
C0 (no default): The current array on which filtering is to be performed.
scale (no default): The scale for which the decomposition is being carried out.
OUTPUTS:
C1 The result of applying the a trous algorithm to the input.
"""
tmp = filter[2]*C0
tmp[(2**(scale+1)):,:] += filter[0]*C0[:-(2**(scale+1)),:]
tmp[:(2**(scale+1)),:] += filter[0]*C0[(2**(scale+1))-1::-1,:]
tmp[(2**scale):,:] += filter[1]*C0[:-(2**scale),:]
tmp[:(2**scale),:] += filter[1]*C0[(2**scale)-1::-1,:]
tmp[:-(2**scale),:] += filter[3]*C0[(2**scale):,:]
tmp[-(2**scale):,:] += filter[3]*C0[:-(2**scale)-1:-1,:]
tmp[:-(2**(scale+1)),:] += filter[4]*C0[(2**(scale+1)):,:]
tmp[-(2**(scale+1)):,:] += filter[4]*C0[:-(2**(scale+1))-1:-1,:]
C1 = filter[2]*tmp
C1[:,(2**(scale+1)):] += filter[0]*tmp[:,:-(2**(scale+1))]
C1[:,:(2**(scale+1))] += filter[0]*tmp[:,(2**(scale+1))-1::-1]
C1[:,(2**scale):] += filter[1]*tmp[:,:-(2**scale)]
C1[:,:(2**scale)] += filter[1]*tmp[:,(2**scale)-1::-1]
C1[:,:-(2**scale)] += filter[3]*tmp[:,(2**scale):]
C1[:,-(2**scale):] += filter[3]*tmp[:,:-(2**scale)-1:-1]
C1[:,:-(2**(scale+1))] += filter[4]*tmp[:,(2**(scale+1)):]
C1[:,-(2**(scale+1)):] += filter[4]*tmp[:,:-(2**(scale+1))-1:-1]
return C1 | def function[ser_a_trous, parameter[C0, filter, scale]]:
constant[
The following is a serial implementation of the a trous algorithm. Accepts the following parameters:
INPUTS:
filter (no default): The filter-bank which is applied to the components of the transform.
C0 (no default): The current array on which filtering is to be performed.
scale (no default): The scale for which the decomposition is being carried out.
OUTPUTS:
C1 The result of applying the a trous algorithm to the input.
]
variable[tmp] assign[=] binary_operation[call[name[filter]][constant[2]] * name[C0]]
<ast.AugAssign object at 0x7da1b25d84c0>
<ast.AugAssign object at 0x7da1b25dbc40>
<ast.AugAssign object at 0x7da1b25db2b0>
<ast.AugAssign object at 0x7da1b25dbe50>
<ast.AugAssign object at 0x7da1b25d8130>
<ast.AugAssign object at 0x7da1b25d9ea0>
<ast.AugAssign object at 0x7da1b25da560>
<ast.AugAssign object at 0x7da1b25dbd60>
variable[C1] assign[=] binary_operation[call[name[filter]][constant[2]] * name[tmp]]
<ast.AugAssign object at 0x7da1b25d8d30>
<ast.AugAssign object at 0x7da1b25d9ab0>
<ast.AugAssign object at 0x7da1b25db070>
<ast.AugAssign object at 0x7da1b258b6a0>
<ast.AugAssign object at 0x7da1b2589930>
<ast.AugAssign object at 0x7da1b25882b0>
<ast.AugAssign object at 0x7da1b25897e0>
<ast.AugAssign object at 0x7da1b258ace0>
return[name[C1]] | keyword[def] identifier[ser_a_trous] ( identifier[C0] , identifier[filter] , identifier[scale] ):
literal[string]
identifier[tmp] = identifier[filter] [ literal[int] ]* identifier[C0]
identifier[tmp] [( literal[int] **( identifier[scale] + literal[int] )):,:]+= identifier[filter] [ literal[int] ]* identifier[C0] [:-( literal[int] **( identifier[scale] + literal[int] )),:]
identifier[tmp] [:( literal[int] **( identifier[scale] + literal[int] )),:]+= identifier[filter] [ literal[int] ]* identifier[C0] [( literal[int] **( identifier[scale] + literal[int] ))- literal[int] ::- literal[int] ,:]
identifier[tmp] [( literal[int] ** identifier[scale] ):,:]+= identifier[filter] [ literal[int] ]* identifier[C0] [:-( literal[int] ** identifier[scale] ),:]
identifier[tmp] [:( literal[int] ** identifier[scale] ),:]+= identifier[filter] [ literal[int] ]* identifier[C0] [( literal[int] ** identifier[scale] )- literal[int] ::- literal[int] ,:]
identifier[tmp] [:-( literal[int] ** identifier[scale] ),:]+= identifier[filter] [ literal[int] ]* identifier[C0] [( literal[int] ** identifier[scale] ):,:]
identifier[tmp] [-( literal[int] ** identifier[scale] ):,:]+= identifier[filter] [ literal[int] ]* identifier[C0] [:-( literal[int] ** identifier[scale] )- literal[int] :- literal[int] ,:]
identifier[tmp] [:-( literal[int] **( identifier[scale] + literal[int] )),:]+= identifier[filter] [ literal[int] ]* identifier[C0] [( literal[int] **( identifier[scale] + literal[int] )):,:]
identifier[tmp] [-( literal[int] **( identifier[scale] + literal[int] )):,:]+= identifier[filter] [ literal[int] ]* identifier[C0] [:-( literal[int] **( identifier[scale] + literal[int] ))- literal[int] :- literal[int] ,:]
identifier[C1] = identifier[filter] [ literal[int] ]* identifier[tmp]
identifier[C1] [:,( literal[int] **( identifier[scale] + literal[int] )):]+= identifier[filter] [ literal[int] ]* identifier[tmp] [:,:-( literal[int] **( identifier[scale] + literal[int] ))]
identifier[C1] [:,:( literal[int] **( identifier[scale] + literal[int] ))]+= identifier[filter] [ literal[int] ]* identifier[tmp] [:,( literal[int] **( identifier[scale] + literal[int] ))- literal[int] ::- literal[int] ]
identifier[C1] [:,( literal[int] ** identifier[scale] ):]+= identifier[filter] [ literal[int] ]* identifier[tmp] [:,:-( literal[int] ** identifier[scale] )]
identifier[C1] [:,:( literal[int] ** identifier[scale] )]+= identifier[filter] [ literal[int] ]* identifier[tmp] [:,( literal[int] ** identifier[scale] )- literal[int] ::- literal[int] ]
identifier[C1] [:,:-( literal[int] ** identifier[scale] )]+= identifier[filter] [ literal[int] ]* identifier[tmp] [:,( literal[int] ** identifier[scale] ):]
identifier[C1] [:,-( literal[int] ** identifier[scale] ):]+= identifier[filter] [ literal[int] ]* identifier[tmp] [:,:-( literal[int] ** identifier[scale] )- literal[int] :- literal[int] ]
identifier[C1] [:,:-( literal[int] **( identifier[scale] + literal[int] ))]+= identifier[filter] [ literal[int] ]* identifier[tmp] [:,( literal[int] **( identifier[scale] + literal[int] )):]
identifier[C1] [:,-( literal[int] **( identifier[scale] + literal[int] )):]+= identifier[filter] [ literal[int] ]* identifier[tmp] [:,:-( literal[int] **( identifier[scale] + literal[int] ))- literal[int] :- literal[int] ]
keyword[return] identifier[C1] | def ser_a_trous(C0, filter, scale):
"""
The following is a serial implementation of the a trous algorithm. Accepts the following parameters:
INPUTS:
filter (no default): The filter-bank which is applied to the components of the transform.
C0 (no default): The current array on which filtering is to be performed.
scale (no default): The scale for which the decomposition is being carried out.
OUTPUTS:
C1 The result of applying the a trous algorithm to the input.
"""
tmp = filter[2] * C0
tmp[2 ** (scale + 1):, :] += filter[0] * C0[:-2 ** (scale + 1), :]
tmp[:2 ** (scale + 1), :] += filter[0] * C0[2 ** (scale + 1) - 1::-1, :]
tmp[2 ** scale:, :] += filter[1] * C0[:-2 ** scale, :]
tmp[:2 ** scale, :] += filter[1] * C0[2 ** scale - 1::-1, :]
tmp[:-2 ** scale, :] += filter[3] * C0[2 ** scale:, :]
tmp[-2 ** scale:, :] += filter[3] * C0[:-2 ** scale - 1:-1, :]
tmp[:-2 ** (scale + 1), :] += filter[4] * C0[2 ** (scale + 1):, :]
tmp[-2 ** (scale + 1):, :] += filter[4] * C0[:-2 ** (scale + 1) - 1:-1, :]
C1 = filter[2] * tmp
C1[:, 2 ** (scale + 1):] += filter[0] * tmp[:, :-2 ** (scale + 1)]
C1[:, :2 ** (scale + 1)] += filter[0] * tmp[:, 2 ** (scale + 1) - 1::-1]
C1[:, 2 ** scale:] += filter[1] * tmp[:, :-2 ** scale]
C1[:, :2 ** scale] += filter[1] * tmp[:, 2 ** scale - 1::-1]
C1[:, :-2 ** scale] += filter[3] * tmp[:, 2 ** scale:]
C1[:, -2 ** scale:] += filter[3] * tmp[:, :-2 ** scale - 1:-1]
C1[:, :-2 ** (scale + 1)] += filter[4] * tmp[:, 2 ** (scale + 1):]
C1[:, -2 ** (scale + 1):] += filter[4] * tmp[:, :-2 ** (scale + 1) - 1:-1]
return C1 |
def cache_entry(self):
"""
Returns a CacheEntry instance for File.
"""
if self.storage_path is None:
raise ValueError('This file is temporary and so a lal '
'cache entry cannot be made')
file_url = urlparse.urlunparse(['file', 'localhost', self.storage_path, None,
None, None])
cache_entry = lal.utils.CacheEntry(self.ifo_string,
self.tagged_description, self.segment_list.extent(), file_url)
cache_entry.workflow_file = self
return cache_entry | def function[cache_entry, parameter[self]]:
constant[
Returns a CacheEntry instance for File.
]
if compare[name[self].storage_path is constant[None]] begin[:]
<ast.Raise object at 0x7da18dc99b40>
variable[file_url] assign[=] call[name[urlparse].urlunparse, parameter[list[[<ast.Constant object at 0x7da18dc988e0>, <ast.Constant object at 0x7da18dc98760>, <ast.Attribute object at 0x7da18dc9a470>, <ast.Constant object at 0x7da18dc9b670>, <ast.Constant object at 0x7da18dc99480>, <ast.Constant object at 0x7da18dc986d0>]]]]
variable[cache_entry] assign[=] call[name[lal].utils.CacheEntry, parameter[name[self].ifo_string, name[self].tagged_description, call[name[self].segment_list.extent, parameter[]], name[file_url]]]
name[cache_entry].workflow_file assign[=] name[self]
return[name[cache_entry]] | keyword[def] identifier[cache_entry] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[storage_path] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[file_url] = identifier[urlparse] . identifier[urlunparse] ([ literal[string] , literal[string] , identifier[self] . identifier[storage_path] , keyword[None] ,
keyword[None] , keyword[None] ])
identifier[cache_entry] = identifier[lal] . identifier[utils] . identifier[CacheEntry] ( identifier[self] . identifier[ifo_string] ,
identifier[self] . identifier[tagged_description] , identifier[self] . identifier[segment_list] . identifier[extent] (), identifier[file_url] )
identifier[cache_entry] . identifier[workflow_file] = identifier[self]
keyword[return] identifier[cache_entry] | def cache_entry(self):
"""
Returns a CacheEntry instance for File.
"""
if self.storage_path is None:
raise ValueError('This file is temporary and so a lal cache entry cannot be made') # depends on [control=['if'], data=[]]
file_url = urlparse.urlunparse(['file', 'localhost', self.storage_path, None, None, None])
cache_entry = lal.utils.CacheEntry(self.ifo_string, self.tagged_description, self.segment_list.extent(), file_url)
cache_entry.workflow_file = self
return cache_entry |
def get_daemon_stats(self, details=False):
"""Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict
"""
# Call the base Daemon one
res = super(Alignak, self).get_daemon_stats(details=details)
res.update({'name': self.name, 'type': self.type, 'monitored_objects': {}})
counters = res['counters']
# Satellites counters
counters['brokers'] = len(self.brokers)
counters['pollers'] = len(self.pollers)
counters['reactionners'] = len(self.reactionners)
counters['receivers'] = len(self.receivers)
if not self.sched:
return res
# # Hosts/services problems counters
# m_solver = MacroResolver()
# counters['hosts_problems'] = m_solver._get_total_host_problems()
# counters['hosts_unhandled_problems'] = m_solver._get_total_host_problems_unhandled()
# counters['services_problems'] = m_solver._get_total_service_problems()
# counters['services_unhandled_problems'] = m_solver._get_total_service_problems_unhandled()
# Get statistics from the scheduler
scheduler_stats = self.sched.get_scheduler_stats(details=details)
res['counters'].update(scheduler_stats['counters'])
scheduler_stats.pop('counters')
res.update(scheduler_stats)
return res | def function[get_daemon_stats, parameter[self, details]]:
constant[Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict
]
variable[res] assign[=] call[call[name[super], parameter[name[Alignak], name[self]]].get_daemon_stats, parameter[]]
call[name[res].update, parameter[dictionary[[<ast.Constant object at 0x7da1b26ad000>, <ast.Constant object at 0x7da1b26ad7e0>, <ast.Constant object at 0x7da1b26ad750>], [<ast.Attribute object at 0x7da1b26adcf0>, <ast.Attribute object at 0x7da1b26ad090>, <ast.Dict object at 0x7da1b26acb50>]]]]
variable[counters] assign[=] call[name[res]][constant[counters]]
call[name[counters]][constant[brokers]] assign[=] call[name[len], parameter[name[self].brokers]]
call[name[counters]][constant[pollers]] assign[=] call[name[len], parameter[name[self].pollers]]
call[name[counters]][constant[reactionners]] assign[=] call[name[len], parameter[name[self].reactionners]]
call[name[counters]][constant[receivers]] assign[=] call[name[len], parameter[name[self].receivers]]
if <ast.UnaryOp object at 0x7da1b26ad030> begin[:]
return[name[res]]
variable[scheduler_stats] assign[=] call[name[self].sched.get_scheduler_stats, parameter[]]
call[call[name[res]][constant[counters]].update, parameter[call[name[scheduler_stats]][constant[counters]]]]
call[name[scheduler_stats].pop, parameter[constant[counters]]]
call[name[res].update, parameter[name[scheduler_stats]]]
return[name[res]] | keyword[def] identifier[get_daemon_stats] ( identifier[self] , identifier[details] = keyword[False] ):
literal[string]
identifier[res] = identifier[super] ( identifier[Alignak] , identifier[self] ). identifier[get_daemon_stats] ( identifier[details] = identifier[details] )
identifier[res] . identifier[update] ({ literal[string] : identifier[self] . identifier[name] , literal[string] : identifier[self] . identifier[type] , literal[string] :{}})
identifier[counters] = identifier[res] [ literal[string] ]
identifier[counters] [ literal[string] ]= identifier[len] ( identifier[self] . identifier[brokers] )
identifier[counters] [ literal[string] ]= identifier[len] ( identifier[self] . identifier[pollers] )
identifier[counters] [ literal[string] ]= identifier[len] ( identifier[self] . identifier[reactionners] )
identifier[counters] [ literal[string] ]= identifier[len] ( identifier[self] . identifier[receivers] )
keyword[if] keyword[not] identifier[self] . identifier[sched] :
keyword[return] identifier[res]
identifier[scheduler_stats] = identifier[self] . identifier[sched] . identifier[get_scheduler_stats] ( identifier[details] = identifier[details] )
identifier[res] [ literal[string] ]. identifier[update] ( identifier[scheduler_stats] [ literal[string] ])
identifier[scheduler_stats] . identifier[pop] ( literal[string] )
identifier[res] . identifier[update] ( identifier[scheduler_stats] )
keyword[return] identifier[res] | def get_daemon_stats(self, details=False):
"""Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict
"""
# Call the base Daemon one
res = super(Alignak, self).get_daemon_stats(details=details)
res.update({'name': self.name, 'type': self.type, 'monitored_objects': {}})
counters = res['counters']
# Satellites counters
counters['brokers'] = len(self.brokers)
counters['pollers'] = len(self.pollers)
counters['reactionners'] = len(self.reactionners)
counters['receivers'] = len(self.receivers)
if not self.sched:
return res # depends on [control=['if'], data=[]]
# # Hosts/services problems counters
# m_solver = MacroResolver()
# counters['hosts_problems'] = m_solver._get_total_host_problems()
# counters['hosts_unhandled_problems'] = m_solver._get_total_host_problems_unhandled()
# counters['services_problems'] = m_solver._get_total_service_problems()
# counters['services_unhandled_problems'] = m_solver._get_total_service_problems_unhandled()
# Get statistics from the scheduler
scheduler_stats = self.sched.get_scheduler_stats(details=details)
res['counters'].update(scheduler_stats['counters'])
scheduler_stats.pop('counters')
res.update(scheduler_stats)
return res |
def _prepare_output(partitions, verbose):
"""Returns dict with 'raw' and 'message' keys filled."""
out = {}
partitions_count = len(partitions)
out['raw'] = {
'offline_count': partitions_count,
}
if partitions_count == 0:
out['message'] = 'No offline partitions.'
else:
out['message'] = "{count} offline partitions.".format(count=partitions_count)
if verbose:
lines = (
'{}:{}'.format(topic, partition)
for (topic, partition) in partitions
)
out['verbose'] = "Partitions:\n" + "\n".join(lines)
else:
cmdline = sys.argv[:]
cmdline.insert(1, '-v')
out['message'] += '\nTo see all offline partitions run: ' + ' '.join(cmdline)
if verbose:
out['raw']['partitions'] = [
{'topic': topic, 'partition': partition}
for (topic, partition) in partitions
]
return out | def function[_prepare_output, parameter[partitions, verbose]]:
constant[Returns dict with 'raw' and 'message' keys filled.]
variable[out] assign[=] dictionary[[], []]
variable[partitions_count] assign[=] call[name[len], parameter[name[partitions]]]
call[name[out]][constant[raw]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0833730>], [<ast.Name object at 0x7da1b08306d0>]]
if compare[name[partitions_count] equal[==] constant[0]] begin[:]
call[name[out]][constant[message]] assign[=] constant[No offline partitions.]
if name[verbose] begin[:]
call[call[name[out]][constant[raw]]][constant[partitions]] assign[=] <ast.ListComp object at 0x7da1b07afb20>
return[name[out]] | keyword[def] identifier[_prepare_output] ( identifier[partitions] , identifier[verbose] ):
literal[string]
identifier[out] ={}
identifier[partitions_count] = identifier[len] ( identifier[partitions] )
identifier[out] [ literal[string] ]={
literal[string] : identifier[partitions_count] ,
}
keyword[if] identifier[partitions_count] == literal[int] :
identifier[out] [ literal[string] ]= literal[string]
keyword[else] :
identifier[out] [ literal[string] ]= literal[string] . identifier[format] ( identifier[count] = identifier[partitions_count] )
keyword[if] identifier[verbose] :
identifier[lines] =(
literal[string] . identifier[format] ( identifier[topic] , identifier[partition] )
keyword[for] ( identifier[topic] , identifier[partition] ) keyword[in] identifier[partitions]
)
identifier[out] [ literal[string] ]= literal[string] + literal[string] . identifier[join] ( identifier[lines] )
keyword[else] :
identifier[cmdline] = identifier[sys] . identifier[argv] [:]
identifier[cmdline] . identifier[insert] ( literal[int] , literal[string] )
identifier[out] [ literal[string] ]+= literal[string] + literal[string] . identifier[join] ( identifier[cmdline] )
keyword[if] identifier[verbose] :
identifier[out] [ literal[string] ][ literal[string] ]=[
{ literal[string] : identifier[topic] , literal[string] : identifier[partition] }
keyword[for] ( identifier[topic] , identifier[partition] ) keyword[in] identifier[partitions]
]
keyword[return] identifier[out] | def _prepare_output(partitions, verbose):
"""Returns dict with 'raw' and 'message' keys filled."""
out = {}
partitions_count = len(partitions)
out['raw'] = {'offline_count': partitions_count}
if partitions_count == 0:
out['message'] = 'No offline partitions.' # depends on [control=['if'], data=[]]
else:
out['message'] = '{count} offline partitions.'.format(count=partitions_count)
if verbose:
lines = ('{}:{}'.format(topic, partition) for (topic, partition) in partitions)
out['verbose'] = 'Partitions:\n' + '\n'.join(lines) # depends on [control=['if'], data=[]]
else:
cmdline = sys.argv[:]
cmdline.insert(1, '-v')
out['message'] += '\nTo see all offline partitions run: ' + ' '.join(cmdline)
if verbose:
out['raw']['partitions'] = [{'topic': topic, 'partition': partition} for (topic, partition) in partitions] # depends on [control=['if'], data=[]]
return out |
def connect(host, port=None, **kwargs):
'''
Test connectivity to a host using a particular
port from the minion.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' network.connect archlinux.org 80
salt '*' network.connect archlinux.org 80 timeout=3
salt '*' network.connect archlinux.org 80 timeout=3 family=ipv4
salt '*' network.connect google-public-dns-a.google.com port=53 proto=udp timeout=3
'''
ret = {'result': None,
'comment': ''}
if not host:
ret['result'] = False
ret['comment'] = 'Required argument, host, is missing.'
return ret
if not port:
ret['result'] = False
ret['comment'] = 'Required argument, port, is missing.'
return ret
proto = kwargs.get('proto', 'tcp')
timeout = kwargs.get('timeout', 5)
family = kwargs.get('family', None)
if salt.utils.validate.net.ipv4_addr(host) or salt.utils.validate.net.ipv6_addr(host):
address = host
else:
address = '{0}'.format(salt.utils.network.sanitize_host(host))
try:
if proto == 'udp':
__proto = socket.SOL_UDP
else:
__proto = socket.SOL_TCP
proto = 'tcp'
if family:
if family == 'ipv4':
__family = socket.AF_INET
elif family == 'ipv6':
__family = socket.AF_INET6
else:
__family = 0
else:
__family = 0
(family,
socktype,
_proto,
garbage,
_address) = socket.getaddrinfo(address, port, __family, 0, __proto)[0]
skt = socket.socket(family, socktype, _proto)
skt.settimeout(timeout)
if proto == 'udp':
# Generate a random string of a
# decent size to test UDP connection
md5h = hashlib.md5()
md5h.update(datetime.datetime.now().strftime('%s'))
msg = md5h.hexdigest()
skt.sendto(msg, _address)
recv, svr = skt.recvfrom(255)
skt.close()
else:
skt.connect(_address)
skt.shutdown(2)
except Exception as exc:
ret['result'] = False
ret['comment'] = 'Unable to connect to {0} ({1}) on {2} port {3}'\
.format(host, _address[0], proto, port)
return ret
ret['result'] = True
ret['comment'] = 'Successfully connected to {0} ({1}) on {2} port {3}'\
.format(host, _address[0], proto, port)
return ret | def function[connect, parameter[host, port]]:
constant[
Test connectivity to a host using a particular
port from the minion.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' network.connect archlinux.org 80
salt '*' network.connect archlinux.org 80 timeout=3
salt '*' network.connect archlinux.org 80 timeout=3 family=ipv4
salt '*' network.connect google-public-dns-a.google.com port=53 proto=udp timeout=3
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da2046219f0>, <ast.Constant object at 0x7da204621c00>], [<ast.Constant object at 0x7da204621210>, <ast.Constant object at 0x7da204623a00>]]
if <ast.UnaryOp object at 0x7da204622410> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] constant[Required argument, host, is missing.]
return[name[ret]]
if <ast.UnaryOp object at 0x7da204621420> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] constant[Required argument, port, is missing.]
return[name[ret]]
variable[proto] assign[=] call[name[kwargs].get, parameter[constant[proto], constant[tcp]]]
variable[timeout] assign[=] call[name[kwargs].get, parameter[constant[timeout], constant[5]]]
variable[family] assign[=] call[name[kwargs].get, parameter[constant[family], constant[None]]]
if <ast.BoolOp object at 0x7da2046213f0> begin[:]
variable[address] assign[=] name[host]
<ast.Try object at 0x7da2054a5db0>
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[Successfully connected to {0} ({1}) on {2} port {3}].format, parameter[name[host], call[name[_address]][constant[0]], name[proto], name[port]]]
return[name[ret]] | keyword[def] identifier[connect] ( identifier[host] , identifier[port] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : keyword[None] ,
literal[string] : literal[string] }
keyword[if] keyword[not] identifier[host] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[if] keyword[not] identifier[port] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[proto] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[timeout] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[family] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[salt] . identifier[utils] . identifier[validate] . identifier[net] . identifier[ipv4_addr] ( identifier[host] ) keyword[or] identifier[salt] . identifier[utils] . identifier[validate] . identifier[net] . identifier[ipv6_addr] ( identifier[host] ):
identifier[address] = identifier[host]
keyword[else] :
identifier[address] = literal[string] . identifier[format] ( identifier[salt] . identifier[utils] . identifier[network] . identifier[sanitize_host] ( identifier[host] ))
keyword[try] :
keyword[if] identifier[proto] == literal[string] :
identifier[__proto] = identifier[socket] . identifier[SOL_UDP]
keyword[else] :
identifier[__proto] = identifier[socket] . identifier[SOL_TCP]
identifier[proto] = literal[string]
keyword[if] identifier[family] :
keyword[if] identifier[family] == literal[string] :
identifier[__family] = identifier[socket] . identifier[AF_INET]
keyword[elif] identifier[family] == literal[string] :
identifier[__family] = identifier[socket] . identifier[AF_INET6]
keyword[else] :
identifier[__family] = literal[int]
keyword[else] :
identifier[__family] = literal[int]
( identifier[family] ,
identifier[socktype] ,
identifier[_proto] ,
identifier[garbage] ,
identifier[_address] )= identifier[socket] . identifier[getaddrinfo] ( identifier[address] , identifier[port] , identifier[__family] , literal[int] , identifier[__proto] )[ literal[int] ]
identifier[skt] = identifier[socket] . identifier[socket] ( identifier[family] , identifier[socktype] , identifier[_proto] )
identifier[skt] . identifier[settimeout] ( identifier[timeout] )
keyword[if] identifier[proto] == literal[string] :
identifier[md5h] = identifier[hashlib] . identifier[md5] ()
identifier[md5h] . identifier[update] ( identifier[datetime] . identifier[datetime] . identifier[now] (). identifier[strftime] ( literal[string] ))
identifier[msg] = identifier[md5h] . identifier[hexdigest] ()
identifier[skt] . identifier[sendto] ( identifier[msg] , identifier[_address] )
identifier[recv] , identifier[svr] = identifier[skt] . identifier[recvfrom] ( literal[int] )
identifier[skt] . identifier[close] ()
keyword[else] :
identifier[skt] . identifier[connect] ( identifier[_address] )
identifier[skt] . identifier[shutdown] ( literal[int] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[host] , identifier[_address] [ literal[int] ], identifier[proto] , identifier[port] )
keyword[return] identifier[ret]
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[host] , identifier[_address] [ literal[int] ], identifier[proto] , identifier[port] )
keyword[return] identifier[ret] | def connect(host, port=None, **kwargs):
"""
Test connectivity to a host using a particular
port from the minion.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' network.connect archlinux.org 80
salt '*' network.connect archlinux.org 80 timeout=3
salt '*' network.connect archlinux.org 80 timeout=3 family=ipv4
salt '*' network.connect google-public-dns-a.google.com port=53 proto=udp timeout=3
"""
ret = {'result': None, 'comment': ''}
if not host:
ret['result'] = False
ret['comment'] = 'Required argument, host, is missing.'
return ret # depends on [control=['if'], data=[]]
if not port:
ret['result'] = False
ret['comment'] = 'Required argument, port, is missing.'
return ret # depends on [control=['if'], data=[]]
proto = kwargs.get('proto', 'tcp')
timeout = kwargs.get('timeout', 5)
family = kwargs.get('family', None)
if salt.utils.validate.net.ipv4_addr(host) or salt.utils.validate.net.ipv6_addr(host):
address = host # depends on [control=['if'], data=[]]
else:
address = '{0}'.format(salt.utils.network.sanitize_host(host))
try:
if proto == 'udp':
__proto = socket.SOL_UDP # depends on [control=['if'], data=[]]
else:
__proto = socket.SOL_TCP
proto = 'tcp'
if family:
if family == 'ipv4':
__family = socket.AF_INET # depends on [control=['if'], data=[]]
elif family == 'ipv6':
__family = socket.AF_INET6 # depends on [control=['if'], data=[]]
else:
__family = 0 # depends on [control=['if'], data=[]]
else:
__family = 0
(family, socktype, _proto, garbage, _address) = socket.getaddrinfo(address, port, __family, 0, __proto)[0]
skt = socket.socket(family, socktype, _proto)
skt.settimeout(timeout)
if proto == 'udp':
# Generate a random string of a
# decent size to test UDP connection
md5h = hashlib.md5()
md5h.update(datetime.datetime.now().strftime('%s'))
msg = md5h.hexdigest()
skt.sendto(msg, _address)
(recv, svr) = skt.recvfrom(255)
skt.close() # depends on [control=['if'], data=[]]
else:
skt.connect(_address)
skt.shutdown(2) # depends on [control=['try'], data=[]]
except Exception as exc:
ret['result'] = False
ret['comment'] = 'Unable to connect to {0} ({1}) on {2} port {3}'.format(host, _address[0], proto, port)
return ret # depends on [control=['except'], data=[]]
ret['result'] = True
ret['comment'] = 'Successfully connected to {0} ({1}) on {2} port {3}'.format(host, _address[0], proto, port)
return ret |
def operate(self, point):
"""
Apply the operation on a point.
Args:
point: Cartesian coordinate.
Returns:
Coordinates of point after operation.
"""
affine_point = np.array([point[0], point[1], point[2], 1])
return np.dot(self.affine_matrix, affine_point)[0:3] | def function[operate, parameter[self, point]]:
constant[
Apply the operation on a point.
Args:
point: Cartesian coordinate.
Returns:
Coordinates of point after operation.
]
variable[affine_point] assign[=] call[name[np].array, parameter[list[[<ast.Subscript object at 0x7da18bc71f90>, <ast.Subscript object at 0x7da18bc70520>, <ast.Subscript object at 0x7da18bc70130>, <ast.Constant object at 0x7da18bc70400>]]]]
return[call[call[name[np].dot, parameter[name[self].affine_matrix, name[affine_point]]]][<ast.Slice object at 0x7da18bc72e30>]] | keyword[def] identifier[operate] ( identifier[self] , identifier[point] ):
literal[string]
identifier[affine_point] = identifier[np] . identifier[array] ([ identifier[point] [ literal[int] ], identifier[point] [ literal[int] ], identifier[point] [ literal[int] ], literal[int] ])
keyword[return] identifier[np] . identifier[dot] ( identifier[self] . identifier[affine_matrix] , identifier[affine_point] )[ literal[int] : literal[int] ] | def operate(self, point):
"""
Apply the operation on a point.
Args:
point: Cartesian coordinate.
Returns:
Coordinates of point after operation.
"""
affine_point = np.array([point[0], point[1], point[2], 1])
return np.dot(self.affine_matrix, affine_point)[0:3] |
def render(self, bindings):
"""Renders a string from a path template using the provided bindings.
Args:
bindings (dict): A dictionary of var names to binding strings.
Returns:
str: The rendered instantiation of this path template.
Raises:
ValidationError: If a key isn't provided or if a sub-template can't
be parsed.
"""
out = []
binding = False
for segment in self.segments:
if segment.kind == _BINDING:
if segment.literal not in bindings:
raise ValidationException(
('rendering error: value for key \'{}\' '
'not provided').format(segment.literal))
out.extend(PathTemplate(bindings[segment.literal]).segments)
binding = True
elif segment.kind == _END_BINDING:
binding = False
else:
if binding:
continue
out.append(segment)
path = _format(out)
self.match(path)
return path | def function[render, parameter[self, bindings]]:
constant[Renders a string from a path template using the provided bindings.
Args:
bindings (dict): A dictionary of var names to binding strings.
Returns:
str: The rendered instantiation of this path template.
Raises:
ValidationError: If a key isn't provided or if a sub-template can't
be parsed.
]
variable[out] assign[=] list[[]]
variable[binding] assign[=] constant[False]
for taget[name[segment]] in starred[name[self].segments] begin[:]
if compare[name[segment].kind equal[==] name[_BINDING]] begin[:]
if compare[name[segment].literal <ast.NotIn object at 0x7da2590d7190> name[bindings]] begin[:]
<ast.Raise object at 0x7da18f722cb0>
call[name[out].extend, parameter[call[name[PathTemplate], parameter[call[name[bindings]][name[segment].literal]]].segments]]
variable[binding] assign[=] constant[True]
variable[path] assign[=] call[name[_format], parameter[name[out]]]
call[name[self].match, parameter[name[path]]]
return[name[path]] | keyword[def] identifier[render] ( identifier[self] , identifier[bindings] ):
literal[string]
identifier[out] =[]
identifier[binding] = keyword[False]
keyword[for] identifier[segment] keyword[in] identifier[self] . identifier[segments] :
keyword[if] identifier[segment] . identifier[kind] == identifier[_BINDING] :
keyword[if] identifier[segment] . identifier[literal] keyword[not] keyword[in] identifier[bindings] :
keyword[raise] identifier[ValidationException] (
( literal[string]
literal[string] ). identifier[format] ( identifier[segment] . identifier[literal] ))
identifier[out] . identifier[extend] ( identifier[PathTemplate] ( identifier[bindings] [ identifier[segment] . identifier[literal] ]). identifier[segments] )
identifier[binding] = keyword[True]
keyword[elif] identifier[segment] . identifier[kind] == identifier[_END_BINDING] :
identifier[binding] = keyword[False]
keyword[else] :
keyword[if] identifier[binding] :
keyword[continue]
identifier[out] . identifier[append] ( identifier[segment] )
identifier[path] = identifier[_format] ( identifier[out] )
identifier[self] . identifier[match] ( identifier[path] )
keyword[return] identifier[path] | def render(self, bindings):
"""Renders a string from a path template using the provided bindings.
Args:
bindings (dict): A dictionary of var names to binding strings.
Returns:
str: The rendered instantiation of this path template.
Raises:
ValidationError: If a key isn't provided or if a sub-template can't
be parsed.
"""
out = []
binding = False
for segment in self.segments:
if segment.kind == _BINDING:
if segment.literal not in bindings:
raise ValidationException("rendering error: value for key '{}' not provided".format(segment.literal)) # depends on [control=['if'], data=[]]
out.extend(PathTemplate(bindings[segment.literal]).segments)
binding = True # depends on [control=['if'], data=[]]
elif segment.kind == _END_BINDING:
binding = False # depends on [control=['if'], data=[]]
else:
if binding:
continue # depends on [control=['if'], data=[]]
out.append(segment) # depends on [control=['for'], data=['segment']]
path = _format(out)
self.match(path)
return path |
def _reverse_to_source(self, target, group1):
"""
Args:
target (dict): A table containing the reverse transitions for each state
group1 (list): A group of states
Return:
Set: A set of states for which there is a transition with the states of the group
"""
new_group = []
for dst in group1:
new_group += target[dst]
return set(new_group) | def function[_reverse_to_source, parameter[self, target, group1]]:
constant[
Args:
target (dict): A table containing the reverse transitions for each state
group1 (list): A group of states
Return:
Set: A set of states for which there is a transition with the states of the group
]
variable[new_group] assign[=] list[[]]
for taget[name[dst]] in starred[name[group1]] begin[:]
<ast.AugAssign object at 0x7da18f811540>
return[call[name[set], parameter[name[new_group]]]] | keyword[def] identifier[_reverse_to_source] ( identifier[self] , identifier[target] , identifier[group1] ):
literal[string]
identifier[new_group] =[]
keyword[for] identifier[dst] keyword[in] identifier[group1] :
identifier[new_group] += identifier[target] [ identifier[dst] ]
keyword[return] identifier[set] ( identifier[new_group] ) | def _reverse_to_source(self, target, group1):
"""
Args:
target (dict): A table containing the reverse transitions for each state
group1 (list): A group of states
Return:
Set: A set of states for which there is a transition with the states of the group
"""
new_group = []
for dst in group1:
new_group += target[dst] # depends on [control=['for'], data=['dst']]
return set(new_group) |
def mirtrace_rna_categories(self):
""" Generate the miRTrace RNA Categories"""
# Specify the order of the different possible categories
keys = OrderedDict()
keys['reads_mirna'] = { 'color': '#33a02c', 'name': 'miRNA' }
keys['reads_rrna'] = { 'color': '#ff7f00', 'name': 'rRNA' }
keys['reads_trna'] = { 'color': '#1f78b4', 'name': 'tRNA' }
keys['reads_artifact'] = { 'color': '#fb9a99', 'name': 'Artifact' }
keys['reads_unknown'] = { 'color': '#d9d9d9', 'name': 'Unknown' }
# Config for the plot
config = {
'id': 'mirtrace_rna_categories_plot',
'title': 'miRTrace: RNA Categories',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(self.summary_data, keys, config) | def function[mirtrace_rna_categories, parameter[self]]:
constant[ Generate the miRTrace RNA Categories]
variable[keys] assign[=] call[name[OrderedDict], parameter[]]
call[name[keys]][constant[reads_mirna]] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b07f0>, <ast.Constant object at 0x7da20e9b1d80>], [<ast.Constant object at 0x7da20e9b0fa0>, <ast.Constant object at 0x7da20e9b1f90>]]
call[name[keys]][constant[reads_rrna]] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b3940>, <ast.Constant object at 0x7da20e9b0730>], [<ast.Constant object at 0x7da20e9b12a0>, <ast.Constant object at 0x7da20e9b2f50>]]
call[name[keys]][constant[reads_trna]] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b13c0>, <ast.Constant object at 0x7da20e9b3340>], [<ast.Constant object at 0x7da20e9b28c0>, <ast.Constant object at 0x7da20e9b3160>]]
call[name[keys]][constant[reads_artifact]] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b2e90>, <ast.Constant object at 0x7da20e9b2cb0>], [<ast.Constant object at 0x7da20e9b06a0>, <ast.Constant object at 0x7da20e9b3760>]]
call[name[keys]][constant[reads_unknown]] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b21d0>, <ast.Constant object at 0x7da20e9b3eb0>], [<ast.Constant object at 0x7da20e9b04f0>, <ast.Constant object at 0x7da20e9b03a0>]]
variable[config] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b2b60>, <ast.Constant object at 0x7da20e9b16f0>, <ast.Constant object at 0x7da20e9b1810>, <ast.Constant object at 0x7da20e9b0a60>], [<ast.Constant object at 0x7da20e9b0fd0>, <ast.Constant object at 0x7da20e9b10f0>, <ast.Constant object at 0x7da20e9b1540>, <ast.Constant object at 0x7da20e9b2740>]]
return[call[name[bargraph].plot, parameter[name[self].summary_data, name[keys], name[config]]]] | keyword[def] identifier[mirtrace_rna_categories] ( identifier[self] ):
literal[string]
identifier[keys] = identifier[OrderedDict] ()
identifier[keys] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] }
identifier[keys] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] }
identifier[keys] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] }
identifier[keys] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] }
identifier[keys] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] }
identifier[config] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
keyword[return] identifier[bargraph] . identifier[plot] ( identifier[self] . identifier[summary_data] , identifier[keys] , identifier[config] ) | def mirtrace_rna_categories(self):
""" Generate the miRTrace RNA Categories"""
# Specify the order of the different possible categories
keys = OrderedDict()
keys['reads_mirna'] = {'color': '#33a02c', 'name': 'miRNA'}
keys['reads_rrna'] = {'color': '#ff7f00', 'name': 'rRNA'}
keys['reads_trna'] = {'color': '#1f78b4', 'name': 'tRNA'}
keys['reads_artifact'] = {'color': '#fb9a99', 'name': 'Artifact'}
keys['reads_unknown'] = {'color': '#d9d9d9', 'name': 'Unknown'}
# Config for the plot
config = {'id': 'mirtrace_rna_categories_plot', 'title': 'miRTrace: RNA Categories', 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads'}
return bargraph.plot(self.summary_data, keys, config) |
def filter_by_meta(data, df, join_meta=False, **kwargs):
"""Filter by and join meta columns from an IamDataFrame to a pd.DataFrame
Parameters
----------
data: pd.DataFrame instance
DataFrame to which meta columns are to be joined,
index or columns must include `['model', 'scenario']`
df: IamDataFrame instance
IamDataFrame from which meta columns are filtered and joined (optional)
join_meta: bool, default False
join selected columns from `df.meta` on `data`
kwargs:
meta columns to be filtered/joined, where `col=...` applies filters
by the given arguments (using `utils.pattern_match()`) and `col=None`
joins the column without filtering (setting col to `np.nan`
if `(model, scenario) not in df.meta.index`)
"""
if not set(META_IDX).issubset(data.index.names + list(data.columns)):
raise ValueError('missing required index dimensions or columns!')
meta = pd.DataFrame(df.meta[list(set(kwargs) - set(META_IDX))].copy())
# filter meta by columns
keep = np.array([True] * len(meta))
apply_filter = False
for col, values in kwargs.items():
if col in META_IDX and values is not None:
_col = meta.index.get_level_values(0 if col is 'model' else 1)
keep &= pattern_match(_col, values, has_nan=False)
apply_filter = True
elif values is not None:
keep &= pattern_match(meta[col], values)
apply_filter |= values is not None
meta = meta[keep]
# set the data index to META_IDX and apply filtered meta index
data = data.copy()
idx = list(data.index.names) if not data.index.names == [None] else None
data = data.reset_index().set_index(META_IDX)
meta = meta.loc[meta.index.intersection(data.index)]
meta.index.names = META_IDX
if apply_filter:
data = data.loc[meta.index]
data.index.names = META_IDX
# join meta (optional), reset index to format as input arg
data = data.join(meta) if join_meta else data
data = data.reset_index().set_index(idx or 'index')
if idx is None:
data.index.name = None
return data | def function[filter_by_meta, parameter[data, df, join_meta]]:
constant[Filter by and join meta columns from an IamDataFrame to a pd.DataFrame
Parameters
----------
data: pd.DataFrame instance
DataFrame to which meta columns are to be joined,
index or columns must include `['model', 'scenario']`
df: IamDataFrame instance
IamDataFrame from which meta columns are filtered and joined (optional)
join_meta: bool, default False
join selected columns from `df.meta` on `data`
kwargs:
meta columns to be filtered/joined, where `col=...` applies filters
by the given arguments (using `utils.pattern_match()`) and `col=None`
joins the column without filtering (setting col to `np.nan`
if `(model, scenario) not in df.meta.index`)
]
if <ast.UnaryOp object at 0x7da2044c0c40> begin[:]
<ast.Raise object at 0x7da2044c2e60>
variable[meta] assign[=] call[name[pd].DataFrame, parameter[call[call[name[df].meta][call[name[list], parameter[binary_operation[call[name[set], parameter[name[kwargs]]] - call[name[set], parameter[name[META_IDX]]]]]]].copy, parameter[]]]]
variable[keep] assign[=] call[name[np].array, parameter[binary_operation[list[[<ast.Constant object at 0x7da207f001f0>]] * call[name[len], parameter[name[meta]]]]]]
variable[apply_filter] assign[=] constant[False]
for taget[tuple[[<ast.Name object at 0x7da207f009d0>, <ast.Name object at 0x7da207f01bd0>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da207f03850> begin[:]
variable[_col] assign[=] call[name[meta].index.get_level_values, parameter[<ast.IfExp object at 0x7da207f037c0>]]
<ast.AugAssign object at 0x7da207f01810>
variable[apply_filter] assign[=] constant[True]
<ast.AugAssign object at 0x7da1b0d1a680>
variable[meta] assign[=] call[name[meta]][name[keep]]
variable[data] assign[=] call[name[data].copy, parameter[]]
variable[idx] assign[=] <ast.IfExp object at 0x7da2041d9d20>
variable[data] assign[=] call[call[name[data].reset_index, parameter[]].set_index, parameter[name[META_IDX]]]
variable[meta] assign[=] call[name[meta].loc][call[name[meta].index.intersection, parameter[name[data].index]]]
name[meta].index.names assign[=] name[META_IDX]
if name[apply_filter] begin[:]
variable[data] assign[=] call[name[data].loc][name[meta].index]
name[data].index.names assign[=] name[META_IDX]
variable[data] assign[=] <ast.IfExp object at 0x7da2044c04f0>
variable[data] assign[=] call[call[name[data].reset_index, parameter[]].set_index, parameter[<ast.BoolOp object at 0x7da2044c3790>]]
if compare[name[idx] is constant[None]] begin[:]
name[data].index.name assign[=] constant[None]
return[name[data]] | keyword[def] identifier[filter_by_meta] ( identifier[data] , identifier[df] , identifier[join_meta] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[set] ( identifier[META_IDX] ). identifier[issubset] ( identifier[data] . identifier[index] . identifier[names] + identifier[list] ( identifier[data] . identifier[columns] )):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[meta] = identifier[pd] . identifier[DataFrame] ( identifier[df] . identifier[meta] [ identifier[list] ( identifier[set] ( identifier[kwargs] )- identifier[set] ( identifier[META_IDX] ))]. identifier[copy] ())
identifier[keep] = identifier[np] . identifier[array] ([ keyword[True] ]* identifier[len] ( identifier[meta] ))
identifier[apply_filter] = keyword[False]
keyword[for] identifier[col] , identifier[values] keyword[in] identifier[kwargs] . identifier[items] ():
keyword[if] identifier[col] keyword[in] identifier[META_IDX] keyword[and] identifier[values] keyword[is] keyword[not] keyword[None] :
identifier[_col] = identifier[meta] . identifier[index] . identifier[get_level_values] ( literal[int] keyword[if] identifier[col] keyword[is] literal[string] keyword[else] literal[int] )
identifier[keep] &= identifier[pattern_match] ( identifier[_col] , identifier[values] , identifier[has_nan] = keyword[False] )
identifier[apply_filter] = keyword[True]
keyword[elif] identifier[values] keyword[is] keyword[not] keyword[None] :
identifier[keep] &= identifier[pattern_match] ( identifier[meta] [ identifier[col] ], identifier[values] )
identifier[apply_filter] |= identifier[values] keyword[is] keyword[not] keyword[None]
identifier[meta] = identifier[meta] [ identifier[keep] ]
identifier[data] = identifier[data] . identifier[copy] ()
identifier[idx] = identifier[list] ( identifier[data] . identifier[index] . identifier[names] ) keyword[if] keyword[not] identifier[data] . identifier[index] . identifier[names] ==[ keyword[None] ] keyword[else] keyword[None]
identifier[data] = identifier[data] . identifier[reset_index] (). identifier[set_index] ( identifier[META_IDX] )
identifier[meta] = identifier[meta] . identifier[loc] [ identifier[meta] . identifier[index] . identifier[intersection] ( identifier[data] . identifier[index] )]
identifier[meta] . identifier[index] . identifier[names] = identifier[META_IDX]
keyword[if] identifier[apply_filter] :
identifier[data] = identifier[data] . identifier[loc] [ identifier[meta] . identifier[index] ]
identifier[data] . identifier[index] . identifier[names] = identifier[META_IDX]
identifier[data] = identifier[data] . identifier[join] ( identifier[meta] ) keyword[if] identifier[join_meta] keyword[else] identifier[data]
identifier[data] = identifier[data] . identifier[reset_index] (). identifier[set_index] ( identifier[idx] keyword[or] literal[string] )
keyword[if] identifier[idx] keyword[is] keyword[None] :
identifier[data] . identifier[index] . identifier[name] = keyword[None]
keyword[return] identifier[data] | def filter_by_meta(data, df, join_meta=False, **kwargs):
"""Filter by and join meta columns from an IamDataFrame to a pd.DataFrame
Parameters
----------
data: pd.DataFrame instance
DataFrame to which meta columns are to be joined,
index or columns must include `['model', 'scenario']`
df: IamDataFrame instance
IamDataFrame from which meta columns are filtered and joined (optional)
join_meta: bool, default False
join selected columns from `df.meta` on `data`
kwargs:
meta columns to be filtered/joined, where `col=...` applies filters
by the given arguments (using `utils.pattern_match()`) and `col=None`
joins the column without filtering (setting col to `np.nan`
if `(model, scenario) not in df.meta.index`)
"""
if not set(META_IDX).issubset(data.index.names + list(data.columns)):
raise ValueError('missing required index dimensions or columns!') # depends on [control=['if'], data=[]]
meta = pd.DataFrame(df.meta[list(set(kwargs) - set(META_IDX))].copy())
# filter meta by columns
keep = np.array([True] * len(meta))
apply_filter = False
for (col, values) in kwargs.items():
if col in META_IDX and values is not None:
_col = meta.index.get_level_values(0 if col is 'model' else 1)
keep &= pattern_match(_col, values, has_nan=False)
apply_filter = True # depends on [control=['if'], data=[]]
elif values is not None:
keep &= pattern_match(meta[col], values) # depends on [control=['if'], data=['values']]
apply_filter |= values is not None # depends on [control=['for'], data=[]]
meta = meta[keep]
# set the data index to META_IDX and apply filtered meta index
data = data.copy()
idx = list(data.index.names) if not data.index.names == [None] else None
data = data.reset_index().set_index(META_IDX)
meta = meta.loc[meta.index.intersection(data.index)]
meta.index.names = META_IDX
if apply_filter:
data = data.loc[meta.index] # depends on [control=['if'], data=[]]
data.index.names = META_IDX
# join meta (optional), reset index to format as input arg
data = data.join(meta) if join_meta else data
data = data.reset_index().set_index(idx or 'index')
if idx is None:
data.index.name = None # depends on [control=['if'], data=[]]
return data |
def resolution(self, index):
"""Resolution with a given index.
Parameters
----------
index : int
Resolution index.
Global if this is the ``aionationstates.wa`` object, local
if this is ``aionationstates.ga`` or ``aionationstates.sc``.
Returns
-------
:class:`ApiQuery` of :class:`Resolution`
Raises
------
:class:`NotFound`
If a resolution with the requested index doesn't exist.
"""
@api_query('resolution', id=str(index))
async def result(_, root):
elem = root.find('RESOLUTION')
if not elem:
raise NotFound(f'No resolution found with index {index}')
return Resolution(elem)
return result(self) | def function[resolution, parameter[self, index]]:
constant[Resolution with a given index.
Parameters
----------
index : int
Resolution index.
Global if this is the ``aionationstates.wa`` object, local
if this is ``aionationstates.ga`` or ``aionationstates.sc``.
Returns
-------
:class:`ApiQuery` of :class:`Resolution`
Raises
------
:class:`NotFound`
If a resolution with the requested index doesn't exist.
]
<ast.AsyncFunctionDef object at 0x7da207f981c0>
return[call[name[result], parameter[name[self]]]] | keyword[def] identifier[resolution] ( identifier[self] , identifier[index] ):
literal[string]
@ identifier[api_query] ( literal[string] , identifier[id] = identifier[str] ( identifier[index] ))
keyword[async] keyword[def] identifier[result] ( identifier[_] , identifier[root] ):
identifier[elem] = identifier[root] . identifier[find] ( literal[string] )
keyword[if] keyword[not] identifier[elem] :
keyword[raise] identifier[NotFound] ( literal[string] )
keyword[return] identifier[Resolution] ( identifier[elem] )
keyword[return] identifier[result] ( identifier[self] ) | def resolution(self, index):
"""Resolution with a given index.
Parameters
----------
index : int
Resolution index.
Global if this is the ``aionationstates.wa`` object, local
if this is ``aionationstates.ga`` or ``aionationstates.sc``.
Returns
-------
:class:`ApiQuery` of :class:`Resolution`
Raises
------
:class:`NotFound`
If a resolution with the requested index doesn't exist.
"""
@api_query('resolution', id=str(index))
async def result(_, root):
elem = root.find('RESOLUTION')
if not elem:
raise NotFound(f'No resolution found with index {index}') # depends on [control=['if'], data=[]]
return Resolution(elem)
return result(self) |
def read_xml(cls, url, features, timestamp, game_number):
"""
read xml object
:param url: contents url
:param features: markup provider
:param timestamp: game day
:param game_number: game number
:return: pitchpx.game.game.Game object
"""
soup = MlbamUtil.find_xml("".join([url, cls.FILENAME]), features)
return cls._generate_game_object(soup, timestamp, game_number) | def function[read_xml, parameter[cls, url, features, timestamp, game_number]]:
constant[
read xml object
:param url: contents url
:param features: markup provider
:param timestamp: game day
:param game_number: game number
:return: pitchpx.game.game.Game object
]
variable[soup] assign[=] call[name[MlbamUtil].find_xml, parameter[call[constant[].join, parameter[list[[<ast.Name object at 0x7da2044c1cc0>, <ast.Attribute object at 0x7da2044c3340>]]]], name[features]]]
return[call[name[cls]._generate_game_object, parameter[name[soup], name[timestamp], name[game_number]]]] | keyword[def] identifier[read_xml] ( identifier[cls] , identifier[url] , identifier[features] , identifier[timestamp] , identifier[game_number] ):
literal[string]
identifier[soup] = identifier[MlbamUtil] . identifier[find_xml] ( literal[string] . identifier[join] ([ identifier[url] , identifier[cls] . identifier[FILENAME] ]), identifier[features] )
keyword[return] identifier[cls] . identifier[_generate_game_object] ( identifier[soup] , identifier[timestamp] , identifier[game_number] ) | def read_xml(cls, url, features, timestamp, game_number):
"""
read xml object
:param url: contents url
:param features: markup provider
:param timestamp: game day
:param game_number: game number
:return: pitchpx.game.game.Game object
"""
soup = MlbamUtil.find_xml(''.join([url, cls.FILENAME]), features)
return cls._generate_game_object(soup, timestamp, game_number) |
def copy_bootstrap(bootstrap_target: Path) -> None:
"""Copy bootstrap code from shiv into the pyz.
This function is excluded from type checking due to the conditional import.
:param bootstrap_target: The temporary directory where we are staging pyz contents.
"""
for bootstrap_file in importlib_resources.contents(bootstrap):
if importlib_resources.is_resource(bootstrap, bootstrap_file):
with importlib_resources.path(bootstrap, bootstrap_file) as f:
shutil.copyfile(f.absolute(), bootstrap_target / f.name) | def function[copy_bootstrap, parameter[bootstrap_target]]:
constant[Copy bootstrap code from shiv into the pyz.
This function is excluded from type checking due to the conditional import.
:param bootstrap_target: The temporary directory where we are staging pyz contents.
]
for taget[name[bootstrap_file]] in starred[call[name[importlib_resources].contents, parameter[name[bootstrap]]]] begin[:]
if call[name[importlib_resources].is_resource, parameter[name[bootstrap], name[bootstrap_file]]] begin[:]
with call[name[importlib_resources].path, parameter[name[bootstrap], name[bootstrap_file]]] begin[:]
call[name[shutil].copyfile, parameter[call[name[f].absolute, parameter[]], binary_operation[name[bootstrap_target] / name[f].name]]] | keyword[def] identifier[copy_bootstrap] ( identifier[bootstrap_target] : identifier[Path] )-> keyword[None] :
literal[string]
keyword[for] identifier[bootstrap_file] keyword[in] identifier[importlib_resources] . identifier[contents] ( identifier[bootstrap] ):
keyword[if] identifier[importlib_resources] . identifier[is_resource] ( identifier[bootstrap] , identifier[bootstrap_file] ):
keyword[with] identifier[importlib_resources] . identifier[path] ( identifier[bootstrap] , identifier[bootstrap_file] ) keyword[as] identifier[f] :
identifier[shutil] . identifier[copyfile] ( identifier[f] . identifier[absolute] (), identifier[bootstrap_target] / identifier[f] . identifier[name] ) | def copy_bootstrap(bootstrap_target: Path) -> None:
"""Copy bootstrap code from shiv into the pyz.
This function is excluded from type checking due to the conditional import.
:param bootstrap_target: The temporary directory where we are staging pyz contents.
"""
for bootstrap_file in importlib_resources.contents(bootstrap):
if importlib_resources.is_resource(bootstrap, bootstrap_file):
with importlib_resources.path(bootstrap, bootstrap_file) as f:
shutil.copyfile(f.absolute(), bootstrap_target / f.name) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['bootstrap_file']] |
def replace_all_post_order(expression: Expression, rules: Iterable[ReplacementRule]) \
-> Union[Expression, Sequence[Expression]]:
"""Replace all occurrences of the patterns according to the replacement rules.
A replacement rule consists of a *pattern*, that is matched against any subexpression
of the expression. If a match is found, the *replacement* callback of the rule is called with
the variables from the match substitution. Whatever the callback returns is used as a replacement for the
matched subexpression. This can either be a single expression or a sequence of expressions, which is then
integrated into the surrounding operation in place of the subexpression.
Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions
will be matched.
Args:
expression:
The expression to which the replacement rules are applied.
rules:
A collection of replacement rules that are applied to the expression.
max_count:
If given, at most *max_count* applications of the rules are performed. Otherwise, the rules
are applied until there is no more match. If the set of replacement rules is not confluent,
the replacement might not terminate without a *max_count* set.
Returns:
The resulting expression after the application of the replacement rules. This can also be a sequence of
expressions, if the root expression is replaced with a sequence of expressions by a rule.
"""
return _replace_all_post_order(expression, rules)[0] | def function[replace_all_post_order, parameter[expression, rules]]:
constant[Replace all occurrences of the patterns according to the replacement rules.
A replacement rule consists of a *pattern*, that is matched against any subexpression
of the expression. If a match is found, the *replacement* callback of the rule is called with
the variables from the match substitution. Whatever the callback returns is used as a replacement for the
matched subexpression. This can either be a single expression or a sequence of expressions, which is then
integrated into the surrounding operation in place of the subexpression.
Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions
will be matched.
Args:
expression:
The expression to which the replacement rules are applied.
rules:
A collection of replacement rules that are applied to the expression.
max_count:
If given, at most *max_count* applications of the rules are performed. Otherwise, the rules
are applied until there is no more match. If the set of replacement rules is not confluent,
the replacement might not terminate without a *max_count* set.
Returns:
The resulting expression after the application of the replacement rules. This can also be a sequence of
expressions, if the root expression is replaced with a sequence of expressions by a rule.
]
return[call[call[name[_replace_all_post_order], parameter[name[expression], name[rules]]]][constant[0]]] | keyword[def] identifier[replace_all_post_order] ( identifier[expression] : identifier[Expression] , identifier[rules] : identifier[Iterable] [ identifier[ReplacementRule] ])-> identifier[Union] [ identifier[Expression] , identifier[Sequence] [ identifier[Expression] ]]:
literal[string]
keyword[return] identifier[_replace_all_post_order] ( identifier[expression] , identifier[rules] )[ literal[int] ] | def replace_all_post_order(expression: Expression, rules: Iterable[ReplacementRule]) -> Union[Expression, Sequence[Expression]]:
"""Replace all occurrences of the patterns according to the replacement rules.
A replacement rule consists of a *pattern*, that is matched against any subexpression
of the expression. If a match is found, the *replacement* callback of the rule is called with
the variables from the match substitution. Whatever the callback returns is used as a replacement for the
matched subexpression. This can either be a single expression or a sequence of expressions, which is then
integrated into the surrounding operation in place of the subexpression.
Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions
will be matched.
Args:
expression:
The expression to which the replacement rules are applied.
rules:
A collection of replacement rules that are applied to the expression.
max_count:
If given, at most *max_count* applications of the rules are performed. Otherwise, the rules
are applied until there is no more match. If the set of replacement rules is not confluent,
the replacement might not terminate without a *max_count* set.
Returns:
The resulting expression after the application of the replacement rules. This can also be a sequence of
expressions, if the root expression is replaced with a sequence of expressions by a rule.
"""
return _replace_all_post_order(expression, rules)[0] |
def zremrangebyrank(self, key, start, stop):
"""Remove all members in a sorted set within the given indexes.
:raises TypeError: if start is not int
:raises TypeError: if stop is not int
"""
if not isinstance(start, int):
raise TypeError("start argument must be int")
if not isinstance(stop, int):
raise TypeError("stop argument must be int")
return self.execute(b'ZREMRANGEBYRANK', key, start, stop) | def function[zremrangebyrank, parameter[self, key, start, stop]]:
constant[Remove all members in a sorted set within the given indexes.
:raises TypeError: if start is not int
:raises TypeError: if stop is not int
]
if <ast.UnaryOp object at 0x7da1b235b550> begin[:]
<ast.Raise object at 0x7da1b2359a80>
if <ast.UnaryOp object at 0x7da1b235b610> begin[:]
<ast.Raise object at 0x7da2054a6890>
return[call[name[self].execute, parameter[constant[b'ZREMRANGEBYRANK'], name[key], name[start], name[stop]]]] | keyword[def] identifier[zremrangebyrank] ( identifier[self] , identifier[key] , identifier[start] , identifier[stop] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[start] , identifier[int] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[stop] , identifier[int] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[return] identifier[self] . identifier[execute] ( literal[string] , identifier[key] , identifier[start] , identifier[stop] ) | def zremrangebyrank(self, key, start, stop):
"""Remove all members in a sorted set within the given indexes.
:raises TypeError: if start is not int
:raises TypeError: if stop is not int
"""
if not isinstance(start, int):
raise TypeError('start argument must be int') # depends on [control=['if'], data=[]]
if not isinstance(stop, int):
raise TypeError('stop argument must be int') # depends on [control=['if'], data=[]]
return self.execute(b'ZREMRANGEBYRANK', key, start, stop) |
def create_runscript(self, default="/bin/bash", force=False):
'''create_entrypoint is intended to create a singularity runscript
based on a Docker entrypoint or command. We first use the Docker
ENTRYPOINT, if defined. If not, we use the CMD. If neither is found,
we use function default.
Parameters
==========
default: set a default entrypoint, if the container does not have
an entrypoint or cmd.
force: If true, use default and ignore Dockerfile settings
'''
entrypoint = default
# Only look at Docker if not enforcing default
if force is False:
if self.entrypoint is not None:
entrypoint = ''.join(self.entrypoint)
elif self.cmd is not None:
entrypoint = ''.join(self.cmd)
# Entrypoint should use exec
if not entrypoint.startswith('exec'):
entrypoint = "exec %s" %entrypoint
# Should take input arguments into account
if not re.search('"?[$]@"?', entrypoint):
entrypoint = '%s "$@"' %entrypoint
return entrypoint | def function[create_runscript, parameter[self, default, force]]:
constant[create_entrypoint is intended to create a singularity runscript
based on a Docker entrypoint or command. We first use the Docker
ENTRYPOINT, if defined. If not, we use the CMD. If neither is found,
we use function default.
Parameters
==========
default: set a default entrypoint, if the container does not have
an entrypoint or cmd.
force: If true, use default and ignore Dockerfile settings
]
variable[entrypoint] assign[=] name[default]
if compare[name[force] is constant[False]] begin[:]
if compare[name[self].entrypoint is_not constant[None]] begin[:]
variable[entrypoint] assign[=] call[constant[].join, parameter[name[self].entrypoint]]
if <ast.UnaryOp object at 0x7da1b040dfc0> begin[:]
variable[entrypoint] assign[=] binary_operation[constant[exec %s] <ast.Mod object at 0x7da2590d6920> name[entrypoint]]
if <ast.UnaryOp object at 0x7da1b040dd80> begin[:]
variable[entrypoint] assign[=] binary_operation[constant[%s "$@"] <ast.Mod object at 0x7da2590d6920> name[entrypoint]]
return[name[entrypoint]] | keyword[def] identifier[create_runscript] ( identifier[self] , identifier[default] = literal[string] , identifier[force] = keyword[False] ):
literal[string]
identifier[entrypoint] = identifier[default]
keyword[if] identifier[force] keyword[is] keyword[False] :
keyword[if] identifier[self] . identifier[entrypoint] keyword[is] keyword[not] keyword[None] :
identifier[entrypoint] = literal[string] . identifier[join] ( identifier[self] . identifier[entrypoint] )
keyword[elif] identifier[self] . identifier[cmd] keyword[is] keyword[not] keyword[None] :
identifier[entrypoint] = literal[string] . identifier[join] ( identifier[self] . identifier[cmd] )
keyword[if] keyword[not] identifier[entrypoint] . identifier[startswith] ( literal[string] ):
identifier[entrypoint] = literal[string] % identifier[entrypoint]
keyword[if] keyword[not] identifier[re] . identifier[search] ( literal[string] , identifier[entrypoint] ):
identifier[entrypoint] = literal[string] % identifier[entrypoint]
keyword[return] identifier[entrypoint] | def create_runscript(self, default='/bin/bash', force=False):
"""create_entrypoint is intended to create a singularity runscript
based on a Docker entrypoint or command. We first use the Docker
ENTRYPOINT, if defined. If not, we use the CMD. If neither is found,
we use function default.
Parameters
==========
default: set a default entrypoint, if the container does not have
an entrypoint or cmd.
force: If true, use default and ignore Dockerfile settings
"""
entrypoint = default
# Only look at Docker if not enforcing default
if force is False:
if self.entrypoint is not None:
entrypoint = ''.join(self.entrypoint) # depends on [control=['if'], data=[]]
elif self.cmd is not None:
entrypoint = ''.join(self.cmd) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Entrypoint should use exec
if not entrypoint.startswith('exec'):
entrypoint = 'exec %s' % entrypoint # depends on [control=['if'], data=[]]
# Should take input arguments into account
if not re.search('"?[$]@"?', entrypoint):
entrypoint = '%s "$@"' % entrypoint # depends on [control=['if'], data=[]]
return entrypoint |
def get_substances(identifier, namespace='sid', as_dataframe=False, **kwargs):
"""Retrieve the specified substance records from PubChem.
:param identifier: The substance identifier to use as a search query.
:param namespace: (optional) The identifier type, one of sid, name or sourceid/<source name>.
:param as_dataframe: (optional) Automatically extract the :class:`~pubchempy.Substance` properties into a pandas
:class:`~pandas.DataFrame` and return that.
"""
results = get_json(identifier, namespace, 'substance', **kwargs)
substances = [Substance(r) for r in results['PC_Substances']] if results else []
if as_dataframe:
return substances_to_frame(substances)
return substances | def function[get_substances, parameter[identifier, namespace, as_dataframe]]:
constant[Retrieve the specified substance records from PubChem.
:param identifier: The substance identifier to use as a search query.
:param namespace: (optional) The identifier type, one of sid, name or sourceid/<source name>.
:param as_dataframe: (optional) Automatically extract the :class:`~pubchempy.Substance` properties into a pandas
:class:`~pandas.DataFrame` and return that.
]
variable[results] assign[=] call[name[get_json], parameter[name[identifier], name[namespace], constant[substance]]]
variable[substances] assign[=] <ast.IfExp object at 0x7da1b0cf5f30>
if name[as_dataframe] begin[:]
return[call[name[substances_to_frame], parameter[name[substances]]]]
return[name[substances]] | keyword[def] identifier[get_substances] ( identifier[identifier] , identifier[namespace] = literal[string] , identifier[as_dataframe] = keyword[False] ,** identifier[kwargs] ):
literal[string]
identifier[results] = identifier[get_json] ( identifier[identifier] , identifier[namespace] , literal[string] ,** identifier[kwargs] )
identifier[substances] =[ identifier[Substance] ( identifier[r] ) keyword[for] identifier[r] keyword[in] identifier[results] [ literal[string] ]] keyword[if] identifier[results] keyword[else] []
keyword[if] identifier[as_dataframe] :
keyword[return] identifier[substances_to_frame] ( identifier[substances] )
keyword[return] identifier[substances] | def get_substances(identifier, namespace='sid', as_dataframe=False, **kwargs):
"""Retrieve the specified substance records from PubChem.
:param identifier: The substance identifier to use as a search query.
:param namespace: (optional) The identifier type, one of sid, name or sourceid/<source name>.
:param as_dataframe: (optional) Automatically extract the :class:`~pubchempy.Substance` properties into a pandas
:class:`~pandas.DataFrame` and return that.
"""
results = get_json(identifier, namespace, 'substance', **kwargs)
substances = [Substance(r) for r in results['PC_Substances']] if results else []
if as_dataframe:
return substances_to_frame(substances) # depends on [control=['if'], data=[]]
return substances |
def get_request_url(environ):
# type: (Dict[str, str]) -> str
"""Return the absolute URL without query string for the given WSGI
environment."""
return "%s://%s/%s" % (
environ.get("wsgi.url_scheme"),
get_host(environ),
wsgi_decoding_dance(environ.get("PATH_INFO") or "").lstrip("/"),
) | def function[get_request_url, parameter[environ]]:
constant[Return the absolute URL without query string for the given WSGI
environment.]
return[binary_operation[constant[%s://%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b19cebf0>, <ast.Call object at 0x7da1b19cce20>, <ast.Call object at 0x7da1b19cc3a0>]]]] | keyword[def] identifier[get_request_url] ( identifier[environ] ):
literal[string]
keyword[return] literal[string] %(
identifier[environ] . identifier[get] ( literal[string] ),
identifier[get_host] ( identifier[environ] ),
identifier[wsgi_decoding_dance] ( identifier[environ] . identifier[get] ( literal[string] ) keyword[or] literal[string] ). identifier[lstrip] ( literal[string] ),
) | def get_request_url(environ):
# type: (Dict[str, str]) -> str
'Return the absolute URL without query string for the given WSGI\n environment.'
return '%s://%s/%s' % (environ.get('wsgi.url_scheme'), get_host(environ), wsgi_decoding_dance(environ.get('PATH_INFO') or '').lstrip('/')) |
def is_touched(self, position):
"""Hit detection method.
Indicates if this key has been hit by a touch / click event at the given position.
:param position: Event position.
:returns: True is the given position collide this key, False otherwise.
"""
return position[0] >= self.position[0] and position[0] <= self.position[0]+ self.size[0] | def function[is_touched, parameter[self, position]]:
constant[Hit detection method.
Indicates if this key has been hit by a touch / click event at the given position.
:param position: Event position.
:returns: True is the given position collide this key, False otherwise.
]
return[<ast.BoolOp object at 0x7da20c7c8eb0>] | keyword[def] identifier[is_touched] ( identifier[self] , identifier[position] ):
literal[string]
keyword[return] identifier[position] [ literal[int] ]>= identifier[self] . identifier[position] [ literal[int] ] keyword[and] identifier[position] [ literal[int] ]<= identifier[self] . identifier[position] [ literal[int] ]+ identifier[self] . identifier[size] [ literal[int] ] | def is_touched(self, position):
"""Hit detection method.
Indicates if this key has been hit by a touch / click event at the given position.
:param position: Event position.
:returns: True is the given position collide this key, False otherwise.
"""
return position[0] >= self.position[0] and position[0] <= self.position[0] + self.size[0] |
def as_dict(self):
"""
Returns the model as a dict
"""
if not self._is_valid:
self.validate()
from .converters import to_dict
return to_dict(self) | def function[as_dict, parameter[self]]:
constant[
Returns the model as a dict
]
if <ast.UnaryOp object at 0x7da1b04c9240> begin[:]
call[name[self].validate, parameter[]]
from relative_module[converters] import module[to_dict]
return[call[name[to_dict], parameter[name[self]]]] | keyword[def] identifier[as_dict] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_is_valid] :
identifier[self] . identifier[validate] ()
keyword[from] . identifier[converters] keyword[import] identifier[to_dict]
keyword[return] identifier[to_dict] ( identifier[self] ) | def as_dict(self):
"""
Returns the model as a dict
"""
if not self._is_valid:
self.validate() # depends on [control=['if'], data=[]]
from .converters import to_dict
return to_dict(self) |
def get_clusters_from_fasta_filepath(
fasta_filepath,
original_fasta_path,
percent_ID=0.97,
max_accepts=1,
max_rejects=8,
stepwords=8,
word_length=8,
optimal=False,
exact=False,
suppress_sort=False,
output_dir=None,
enable_rev_strand_matching=False,
subject_fasta_filepath=None,
suppress_new_clusters=False,
return_cluster_maps=False,
stable_sort=False,
tmp_dir=gettempdir(),
save_uc_files=True,
HALT_EXEC=False):
""" Main convenience wrapper for using uclust to generate cluster files
A source fasta file is required for the fasta_filepath. This will be
sorted to be in order of longest to shortest length sequences. Following
this, the sorted fasta file is used to generate a cluster file in the
uclust (.uc) format. Next the .uc file is converted to cd-hit format
(.clstr). Finally this file is parsed and returned as a list of lists,
where each sublist a cluster of sequences. If an output_dir is
specified, the intermediate files will be preserved, otherwise all
files created are temporary and will be deleted at the end of this
function
The percent_ID parameter specifies the percent identity for a clusters,
i.e., if 99% were the parameter, all sequences that were 99% identical
would be grouped as a cluster.
"""
# Create readable intermediate filenames if they are to be kept
fasta_output_filepath = None
uc_output_filepath = None
cd_hit_filepath = None
if output_dir and not output_dir.endswith('/'):
output_dir += '/'
if save_uc_files:
uc_save_filepath = get_output_filepaths(
output_dir,
original_fasta_path)
else:
uc_save_filepath = None
sorted_fasta_filepath = ""
uc_filepath = ""
clstr_filepath = ""
# Error check in case any app controller fails
files_to_remove = []
try:
if not suppress_sort:
# Sort fasta input file from largest to smallest sequence
sort_fasta = uclust_fasta_sort_from_filepath(fasta_filepath,
output_filepath=fasta_output_filepath)
# Get sorted fasta name from application wrapper
sorted_fasta_filepath = sort_fasta['Output'].name
files_to_remove.append(sorted_fasta_filepath)
else:
sort_fasta = None
sorted_fasta_filepath = fasta_filepath
# Generate uclust cluster file (.uc format)
uclust_cluster = uclust_cluster_from_sorted_fasta_filepath(
sorted_fasta_filepath,
uc_save_filepath,
percent_ID=percent_ID,
max_accepts=max_accepts,
max_rejects=max_rejects,
stepwords=stepwords,
word_length=word_length,
optimal=optimal,
exact=exact,
suppress_sort=suppress_sort,
enable_rev_strand_matching=enable_rev_strand_matching,
subject_fasta_filepath=subject_fasta_filepath,
suppress_new_clusters=suppress_new_clusters,
stable_sort=stable_sort,
tmp_dir=tmp_dir,
HALT_EXEC=HALT_EXEC)
# Get cluster file name from application wrapper
remove_files(files_to_remove)
except ApplicationError:
remove_files(files_to_remove)
raise ApplicationError('Error running uclust. Possible causes are '
'unsupported version (current supported version is v1.2.22) is installed or '
'improperly formatted input file was provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('uclust not found, is it properly ' +
'installed?')
# Get list of lists for each cluster
clusters, failures, seeds = \
clusters_from_uc_file(uclust_cluster['ClusterFile'])
# Remove temp files unless user specifies output filepath
if not save_uc_files:
uclust_cluster.cleanUp()
if return_cluster_maps:
return clusters, failures, seeds
else:
return clusters.values(), failures, seeds | def function[get_clusters_from_fasta_filepath, parameter[fasta_filepath, original_fasta_path, percent_ID, max_accepts, max_rejects, stepwords, word_length, optimal, exact, suppress_sort, output_dir, enable_rev_strand_matching, subject_fasta_filepath, suppress_new_clusters, return_cluster_maps, stable_sort, tmp_dir, save_uc_files, HALT_EXEC]]:
constant[ Main convenience wrapper for using uclust to generate cluster files
A source fasta file is required for the fasta_filepath. This will be
sorted to be in order of longest to shortest length sequences. Following
this, the sorted fasta file is used to generate a cluster file in the
uclust (.uc) format. Next the .uc file is converted to cd-hit format
(.clstr). Finally this file is parsed and returned as a list of lists,
where each sublist a cluster of sequences. If an output_dir is
specified, the intermediate files will be preserved, otherwise all
files created are temporary and will be deleted at the end of this
function
The percent_ID parameter specifies the percent identity for a clusters,
i.e., if 99% were the parameter, all sequences that were 99% identical
would be grouped as a cluster.
]
variable[fasta_output_filepath] assign[=] constant[None]
variable[uc_output_filepath] assign[=] constant[None]
variable[cd_hit_filepath] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b0b728c0> begin[:]
<ast.AugAssign object at 0x7da1b0b71600>
if name[save_uc_files] begin[:]
variable[uc_save_filepath] assign[=] call[name[get_output_filepaths], parameter[name[output_dir], name[original_fasta_path]]]
variable[sorted_fasta_filepath] assign[=] constant[]
variable[uc_filepath] assign[=] constant[]
variable[clstr_filepath] assign[=] constant[]
variable[files_to_remove] assign[=] list[[]]
<ast.Try object at 0x7da1b0b71bd0>
<ast.Tuple object at 0x7da1b0b82e60> assign[=] call[name[clusters_from_uc_file], parameter[call[name[uclust_cluster]][constant[ClusterFile]]]]
if <ast.UnaryOp object at 0x7da1b0b81900> begin[:]
call[name[uclust_cluster].cleanUp, parameter[]]
if name[return_cluster_maps] begin[:]
return[tuple[[<ast.Name object at 0x7da1b0b807f0>, <ast.Name object at 0x7da1b0b81870>, <ast.Name object at 0x7da1b0b80250>]]] | keyword[def] identifier[get_clusters_from_fasta_filepath] (
identifier[fasta_filepath] ,
identifier[original_fasta_path] ,
identifier[percent_ID] = literal[int] ,
identifier[max_accepts] = literal[int] ,
identifier[max_rejects] = literal[int] ,
identifier[stepwords] = literal[int] ,
identifier[word_length] = literal[int] ,
identifier[optimal] = keyword[False] ,
identifier[exact] = keyword[False] ,
identifier[suppress_sort] = keyword[False] ,
identifier[output_dir] = keyword[None] ,
identifier[enable_rev_strand_matching] = keyword[False] ,
identifier[subject_fasta_filepath] = keyword[None] ,
identifier[suppress_new_clusters] = keyword[False] ,
identifier[return_cluster_maps] = keyword[False] ,
identifier[stable_sort] = keyword[False] ,
identifier[tmp_dir] = identifier[gettempdir] (),
identifier[save_uc_files] = keyword[True] ,
identifier[HALT_EXEC] = keyword[False] ):
literal[string]
identifier[fasta_output_filepath] = keyword[None]
identifier[uc_output_filepath] = keyword[None]
identifier[cd_hit_filepath] = keyword[None]
keyword[if] identifier[output_dir] keyword[and] keyword[not] identifier[output_dir] . identifier[endswith] ( literal[string] ):
identifier[output_dir] += literal[string]
keyword[if] identifier[save_uc_files] :
identifier[uc_save_filepath] = identifier[get_output_filepaths] (
identifier[output_dir] ,
identifier[original_fasta_path] )
keyword[else] :
identifier[uc_save_filepath] = keyword[None]
identifier[sorted_fasta_filepath] = literal[string]
identifier[uc_filepath] = literal[string]
identifier[clstr_filepath] = literal[string]
identifier[files_to_remove] =[]
keyword[try] :
keyword[if] keyword[not] identifier[suppress_sort] :
identifier[sort_fasta] = identifier[uclust_fasta_sort_from_filepath] ( identifier[fasta_filepath] ,
identifier[output_filepath] = identifier[fasta_output_filepath] )
identifier[sorted_fasta_filepath] = identifier[sort_fasta] [ literal[string] ]. identifier[name]
identifier[files_to_remove] . identifier[append] ( identifier[sorted_fasta_filepath] )
keyword[else] :
identifier[sort_fasta] = keyword[None]
identifier[sorted_fasta_filepath] = identifier[fasta_filepath]
identifier[uclust_cluster] = identifier[uclust_cluster_from_sorted_fasta_filepath] (
identifier[sorted_fasta_filepath] ,
identifier[uc_save_filepath] ,
identifier[percent_ID] = identifier[percent_ID] ,
identifier[max_accepts] = identifier[max_accepts] ,
identifier[max_rejects] = identifier[max_rejects] ,
identifier[stepwords] = identifier[stepwords] ,
identifier[word_length] = identifier[word_length] ,
identifier[optimal] = identifier[optimal] ,
identifier[exact] = identifier[exact] ,
identifier[suppress_sort] = identifier[suppress_sort] ,
identifier[enable_rev_strand_matching] = identifier[enable_rev_strand_matching] ,
identifier[subject_fasta_filepath] = identifier[subject_fasta_filepath] ,
identifier[suppress_new_clusters] = identifier[suppress_new_clusters] ,
identifier[stable_sort] = identifier[stable_sort] ,
identifier[tmp_dir] = identifier[tmp_dir] ,
identifier[HALT_EXEC] = identifier[HALT_EXEC] )
identifier[remove_files] ( identifier[files_to_remove] )
keyword[except] identifier[ApplicationError] :
identifier[remove_files] ( identifier[files_to_remove] )
keyword[raise] identifier[ApplicationError] ( literal[string]
literal[string]
literal[string] )
keyword[except] identifier[ApplicationNotFoundError] :
identifier[remove_files] ( identifier[files_to_remove] )
keyword[raise] identifier[ApplicationNotFoundError] ( literal[string] +
literal[string] )
identifier[clusters] , identifier[failures] , identifier[seeds] = identifier[clusters_from_uc_file] ( identifier[uclust_cluster] [ literal[string] ])
keyword[if] keyword[not] identifier[save_uc_files] :
identifier[uclust_cluster] . identifier[cleanUp] ()
keyword[if] identifier[return_cluster_maps] :
keyword[return] identifier[clusters] , identifier[failures] , identifier[seeds]
keyword[else] :
keyword[return] identifier[clusters] . identifier[values] (), identifier[failures] , identifier[seeds] | def get_clusters_from_fasta_filepath(fasta_filepath, original_fasta_path, percent_ID=0.97, max_accepts=1, max_rejects=8, stepwords=8, word_length=8, optimal=False, exact=False, suppress_sort=False, output_dir=None, enable_rev_strand_matching=False, subject_fasta_filepath=None, suppress_new_clusters=False, return_cluster_maps=False, stable_sort=False, tmp_dir=gettempdir(), save_uc_files=True, HALT_EXEC=False):
""" Main convenience wrapper for using uclust to generate cluster files
A source fasta file is required for the fasta_filepath. This will be
sorted to be in order of longest to shortest length sequences. Following
this, the sorted fasta file is used to generate a cluster file in the
uclust (.uc) format. Next the .uc file is converted to cd-hit format
(.clstr). Finally this file is parsed and returned as a list of lists,
where each sublist a cluster of sequences. If an output_dir is
specified, the intermediate files will be preserved, otherwise all
files created are temporary and will be deleted at the end of this
function
The percent_ID parameter specifies the percent identity for a clusters,
i.e., if 99% were the parameter, all sequences that were 99% identical
would be grouped as a cluster.
"""
# Create readable intermediate filenames if they are to be kept
fasta_output_filepath = None
uc_output_filepath = None
cd_hit_filepath = None
if output_dir and (not output_dir.endswith('/')):
output_dir += '/' # depends on [control=['if'], data=[]]
if save_uc_files:
uc_save_filepath = get_output_filepaths(output_dir, original_fasta_path) # depends on [control=['if'], data=[]]
else:
uc_save_filepath = None
sorted_fasta_filepath = ''
uc_filepath = ''
clstr_filepath = ''
# Error check in case any app controller fails
files_to_remove = []
try:
if not suppress_sort:
# Sort fasta input file from largest to smallest sequence
sort_fasta = uclust_fasta_sort_from_filepath(fasta_filepath, output_filepath=fasta_output_filepath)
# Get sorted fasta name from application wrapper
sorted_fasta_filepath = sort_fasta['Output'].name
files_to_remove.append(sorted_fasta_filepath) # depends on [control=['if'], data=[]]
else:
sort_fasta = None
sorted_fasta_filepath = fasta_filepath
# Generate uclust cluster file (.uc format)
uclust_cluster = uclust_cluster_from_sorted_fasta_filepath(sorted_fasta_filepath, uc_save_filepath, percent_ID=percent_ID, max_accepts=max_accepts, max_rejects=max_rejects, stepwords=stepwords, word_length=word_length, optimal=optimal, exact=exact, suppress_sort=suppress_sort, enable_rev_strand_matching=enable_rev_strand_matching, subject_fasta_filepath=subject_fasta_filepath, suppress_new_clusters=suppress_new_clusters, stable_sort=stable_sort, tmp_dir=tmp_dir, HALT_EXEC=HALT_EXEC)
# Get cluster file name from application wrapper
remove_files(files_to_remove) # depends on [control=['try'], data=[]]
except ApplicationError:
remove_files(files_to_remove)
raise ApplicationError('Error running uclust. Possible causes are unsupported version (current supported version is v1.2.22) is installed or improperly formatted input file was provided') # depends on [control=['except'], data=[]]
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('uclust not found, is it properly ' + 'installed?') # depends on [control=['except'], data=[]]
# Get list of lists for each cluster
(clusters, failures, seeds) = clusters_from_uc_file(uclust_cluster['ClusterFile'])
# Remove temp files unless user specifies output filepath
if not save_uc_files:
uclust_cluster.cleanUp() # depends on [control=['if'], data=[]]
if return_cluster_maps:
return (clusters, failures, seeds) # depends on [control=['if'], data=[]]
else:
return (clusters.values(), failures, seeds) |
def get_authorizations(self):
"""
:calls: `GET /authorizations <http://developer.github.com/v3/oauth>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Authorization.Authorization`
"""
return github.PaginatedList.PaginatedList(
github.Authorization.Authorization,
self._requester,
"/authorizations",
None
) | def function[get_authorizations, parameter[self]]:
constant[
:calls: `GET /authorizations <http://developer.github.com/v3/oauth>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Authorization.Authorization`
]
return[call[name[github].PaginatedList.PaginatedList, parameter[name[github].Authorization.Authorization, name[self]._requester, constant[/authorizations], constant[None]]]] | keyword[def] identifier[get_authorizations] ( identifier[self] ):
literal[string]
keyword[return] identifier[github] . identifier[PaginatedList] . identifier[PaginatedList] (
identifier[github] . identifier[Authorization] . identifier[Authorization] ,
identifier[self] . identifier[_requester] ,
literal[string] ,
keyword[None]
) | def get_authorizations(self):
"""
:calls: `GET /authorizations <http://developer.github.com/v3/oauth>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Authorization.Authorization`
"""
return github.PaginatedList.PaginatedList(github.Authorization.Authorization, self._requester, '/authorizations', None) |
def report( self, params, meta, res ):
"""Return a properly-structured dict of results. The default returns a dict with
results keyed by :attr:`Experiment.RESULTS`, the data point in the parameter space
keyed by :attr:`Experiment.PARAMETERS`, and timing and other metadata keyed
by :attr:`Experiment.METADATA`. Overriding this method can be used to record extra
values, but be sure to call the base method as well.
:param params: the parameters we ran under
:param meta: the metadata for this run
:param res: the direct experimental results from do()
:returns: a :term:`results dict`"""
rc = dict()
rc[self.PARAMETERS] = params.copy()
rc[self.METADATA] = meta.copy()
rc[self.RESULTS] = res
return rc | def function[report, parameter[self, params, meta, res]]:
constant[Return a properly-structured dict of results. The default returns a dict with
results keyed by :attr:`Experiment.RESULTS`, the data point in the parameter space
keyed by :attr:`Experiment.PARAMETERS`, and timing and other metadata keyed
by :attr:`Experiment.METADATA`. Overriding this method can be used to record extra
values, but be sure to call the base method as well.
:param params: the parameters we ran under
:param meta: the metadata for this run
:param res: the direct experimental results from do()
:returns: a :term:`results dict`]
variable[rc] assign[=] call[name[dict], parameter[]]
call[name[rc]][name[self].PARAMETERS] assign[=] call[name[params].copy, parameter[]]
call[name[rc]][name[self].METADATA] assign[=] call[name[meta].copy, parameter[]]
call[name[rc]][name[self].RESULTS] assign[=] name[res]
return[name[rc]] | keyword[def] identifier[report] ( identifier[self] , identifier[params] , identifier[meta] , identifier[res] ):
literal[string]
identifier[rc] = identifier[dict] ()
identifier[rc] [ identifier[self] . identifier[PARAMETERS] ]= identifier[params] . identifier[copy] ()
identifier[rc] [ identifier[self] . identifier[METADATA] ]= identifier[meta] . identifier[copy] ()
identifier[rc] [ identifier[self] . identifier[RESULTS] ]= identifier[res]
keyword[return] identifier[rc] | def report(self, params, meta, res):
"""Return a properly-structured dict of results. The default returns a dict with
results keyed by :attr:`Experiment.RESULTS`, the data point in the parameter space
keyed by :attr:`Experiment.PARAMETERS`, and timing and other metadata keyed
by :attr:`Experiment.METADATA`. Overriding this method can be used to record extra
values, but be sure to call the base method as well.
:param params: the parameters we ran under
:param meta: the metadata for this run
:param res: the direct experimental results from do()
:returns: a :term:`results dict`"""
rc = dict()
rc[self.PARAMETERS] = params.copy()
rc[self.METADATA] = meta.copy()
rc[self.RESULTS] = res
return rc |
def get_purge_files(root, output, output_schema, descriptor, descriptor_schema):
"""Get files to purge."""
def remove_file(fn, paths):
"""From paths remove fn and dirs before fn in dir tree."""
while fn:
for i in range(len(paths) - 1, -1, -1):
if fn == paths[i]:
paths.pop(i)
fn, _ = os.path.split(fn)
def remove_tree(fn, paths):
"""From paths remove fn and dirs before or after fn in dir tree."""
for i in range(len(paths) - 1, -1, -1):
head = paths[i]
while head:
if fn == head:
paths.pop(i)
break
head, _ = os.path.split(head)
remove_file(fn, paths)
def subfiles(root):
"""Extend unreferenced list with all subdirs and files in top dir."""
subs = []
for path, dirs, files in os.walk(root, topdown=False):
path = path[len(root) + 1:]
subs.extend(os.path.join(path, f) for f in files)
subs.extend(os.path.join(path, d) for d in dirs)
return subs
unreferenced_files = subfiles(root)
remove_file('jsonout.txt', unreferenced_files)
remove_file('stderr.txt', unreferenced_files)
remove_file('stdout.txt', unreferenced_files)
meta_fields = [
[output, output_schema],
[descriptor, descriptor_schema]
]
for meta_field, meta_field_schema in meta_fields:
for field_schema, fields in iterate_fields(meta_field, meta_field_schema):
if 'type' in field_schema:
field_type = field_schema['type']
field_name = field_schema['name']
# Remove basic:file: entries
if field_type.startswith('basic:file:'):
remove_file(fields[field_name]['file'], unreferenced_files)
# Remove list:basic:file: entries
elif field_type.startswith('list:basic:file:'):
for field in fields[field_name]:
remove_file(field['file'], unreferenced_files)
# Remove basic:dir: entries
elif field_type.startswith('basic:dir:'):
remove_tree(fields[field_name]['dir'], unreferenced_files)
# Remove list:basic:dir: entries
elif field_type.startswith('list:basic:dir:'):
for field in fields[field_name]:
remove_tree(field['dir'], unreferenced_files)
# Remove refs entries
if field_type.startswith('basic:file:') or field_type.startswith('basic:dir:'):
for ref in fields[field_name].get('refs', []):
remove_tree(ref, unreferenced_files)
elif field_type.startswith('list:basic:file:') or field_type.startswith('list:basic:dir:'):
for field in fields[field_name]:
for ref in field.get('refs', []):
remove_tree(ref, unreferenced_files)
return set([os.path.join(root, filename) for filename in unreferenced_files]) | def function[get_purge_files, parameter[root, output, output_schema, descriptor, descriptor_schema]]:
constant[Get files to purge.]
def function[remove_file, parameter[fn, paths]]:
constant[From paths remove fn and dirs before fn in dir tree.]
while name[fn] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[paths]]] - constant[1]], <ast.UnaryOp object at 0x7da1b19b6410>, <ast.UnaryOp object at 0x7da1b19b74f0>]]] begin[:]
if compare[name[fn] equal[==] call[name[paths]][name[i]]] begin[:]
call[name[paths].pop, parameter[name[i]]]
<ast.Tuple object at 0x7da1b19b4640> assign[=] call[name[os].path.split, parameter[name[fn]]]
def function[remove_tree, parameter[fn, paths]]:
constant[From paths remove fn and dirs before or after fn in dir tree.]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[paths]]] - constant[1]], <ast.UnaryOp object at 0x7da1b19b5510>, <ast.UnaryOp object at 0x7da1b19b4ac0>]]] begin[:]
variable[head] assign[=] call[name[paths]][name[i]]
while name[head] begin[:]
if compare[name[fn] equal[==] name[head]] begin[:]
call[name[paths].pop, parameter[name[i]]]
break
<ast.Tuple object at 0x7da1b19b6b00> assign[=] call[name[os].path.split, parameter[name[head]]]
call[name[remove_file], parameter[name[fn], name[paths]]]
def function[subfiles, parameter[root]]:
constant[Extend unreferenced list with all subdirs and files in top dir.]
variable[subs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b19b5930>, <ast.Name object at 0x7da1b19b5cf0>, <ast.Name object at 0x7da1b19b46a0>]]] in starred[call[name[os].walk, parameter[name[root]]]] begin[:]
variable[path] assign[=] call[name[path]][<ast.Slice object at 0x7da1b19b5330>]
call[name[subs].extend, parameter[<ast.GeneratorExp object at 0x7da1b19b79a0>]]
call[name[subs].extend, parameter[<ast.GeneratorExp object at 0x7da1b19b7e80>]]
return[name[subs]]
variable[unreferenced_files] assign[=] call[name[subfiles], parameter[name[root]]]
call[name[remove_file], parameter[constant[jsonout.txt], name[unreferenced_files]]]
call[name[remove_file], parameter[constant[stderr.txt], name[unreferenced_files]]]
call[name[remove_file], parameter[constant[stdout.txt], name[unreferenced_files]]]
variable[meta_fields] assign[=] list[[<ast.List object at 0x7da1b1af11e0>, <ast.List object at 0x7da1b1af1a20>]]
for taget[tuple[[<ast.Name object at 0x7da1b1af32b0>, <ast.Name object at 0x7da1b1af3d00>]]] in starred[name[meta_fields]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1af2b90>, <ast.Name object at 0x7da1b1af1630>]]] in starred[call[name[iterate_fields], parameter[name[meta_field], name[meta_field_schema]]]] begin[:]
if compare[constant[type] in name[field_schema]] begin[:]
variable[field_type] assign[=] call[name[field_schema]][constant[type]]
variable[field_name] assign[=] call[name[field_schema]][constant[name]]
if call[name[field_type].startswith, parameter[constant[basic:file:]]] begin[:]
call[name[remove_file], parameter[call[call[name[fields]][name[field_name]]][constant[file]], name[unreferenced_files]]]
if <ast.BoolOp object at 0x7da1b1af3280> begin[:]
for taget[name[ref]] in starred[call[call[name[fields]][name[field_name]].get, parameter[constant[refs], list[[]]]]] begin[:]
call[name[remove_tree], parameter[name[ref], name[unreferenced_files]]]
return[call[name[set], parameter[<ast.ListComp object at 0x7da1b1af1c30>]]] | keyword[def] identifier[get_purge_files] ( identifier[root] , identifier[output] , identifier[output_schema] , identifier[descriptor] , identifier[descriptor_schema] ):
literal[string]
keyword[def] identifier[remove_file] ( identifier[fn] , identifier[paths] ):
literal[string]
keyword[while] identifier[fn] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[paths] )- literal[int] ,- literal[int] ,- literal[int] ):
keyword[if] identifier[fn] == identifier[paths] [ identifier[i] ]:
identifier[paths] . identifier[pop] ( identifier[i] )
identifier[fn] , identifier[_] = identifier[os] . identifier[path] . identifier[split] ( identifier[fn] )
keyword[def] identifier[remove_tree] ( identifier[fn] , identifier[paths] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[paths] )- literal[int] ,- literal[int] ,- literal[int] ):
identifier[head] = identifier[paths] [ identifier[i] ]
keyword[while] identifier[head] :
keyword[if] identifier[fn] == identifier[head] :
identifier[paths] . identifier[pop] ( identifier[i] )
keyword[break]
identifier[head] , identifier[_] = identifier[os] . identifier[path] . identifier[split] ( identifier[head] )
identifier[remove_file] ( identifier[fn] , identifier[paths] )
keyword[def] identifier[subfiles] ( identifier[root] ):
literal[string]
identifier[subs] =[]
keyword[for] identifier[path] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[root] , identifier[topdown] = keyword[False] ):
identifier[path] = identifier[path] [ identifier[len] ( identifier[root] )+ literal[int] :]
identifier[subs] . identifier[extend] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[files] )
identifier[subs] . identifier[extend] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[dirs] )
keyword[return] identifier[subs]
identifier[unreferenced_files] = identifier[subfiles] ( identifier[root] )
identifier[remove_file] ( literal[string] , identifier[unreferenced_files] )
identifier[remove_file] ( literal[string] , identifier[unreferenced_files] )
identifier[remove_file] ( literal[string] , identifier[unreferenced_files] )
identifier[meta_fields] =[
[ identifier[output] , identifier[output_schema] ],
[ identifier[descriptor] , identifier[descriptor_schema] ]
]
keyword[for] identifier[meta_field] , identifier[meta_field_schema] keyword[in] identifier[meta_fields] :
keyword[for] identifier[field_schema] , identifier[fields] keyword[in] identifier[iterate_fields] ( identifier[meta_field] , identifier[meta_field_schema] ):
keyword[if] literal[string] keyword[in] identifier[field_schema] :
identifier[field_type] = identifier[field_schema] [ literal[string] ]
identifier[field_name] = identifier[field_schema] [ literal[string] ]
keyword[if] identifier[field_type] . identifier[startswith] ( literal[string] ):
identifier[remove_file] ( identifier[fields] [ identifier[field_name] ][ literal[string] ], identifier[unreferenced_files] )
keyword[elif] identifier[field_type] . identifier[startswith] ( literal[string] ):
keyword[for] identifier[field] keyword[in] identifier[fields] [ identifier[field_name] ]:
identifier[remove_file] ( identifier[field] [ literal[string] ], identifier[unreferenced_files] )
keyword[elif] identifier[field_type] . identifier[startswith] ( literal[string] ):
identifier[remove_tree] ( identifier[fields] [ identifier[field_name] ][ literal[string] ], identifier[unreferenced_files] )
keyword[elif] identifier[field_type] . identifier[startswith] ( literal[string] ):
keyword[for] identifier[field] keyword[in] identifier[fields] [ identifier[field_name] ]:
identifier[remove_tree] ( identifier[field] [ literal[string] ], identifier[unreferenced_files] )
keyword[if] identifier[field_type] . identifier[startswith] ( literal[string] ) keyword[or] identifier[field_type] . identifier[startswith] ( literal[string] ):
keyword[for] identifier[ref] keyword[in] identifier[fields] [ identifier[field_name] ]. identifier[get] ( literal[string] ,[]):
identifier[remove_tree] ( identifier[ref] , identifier[unreferenced_files] )
keyword[elif] identifier[field_type] . identifier[startswith] ( literal[string] ) keyword[or] identifier[field_type] . identifier[startswith] ( literal[string] ):
keyword[for] identifier[field] keyword[in] identifier[fields] [ identifier[field_name] ]:
keyword[for] identifier[ref] keyword[in] identifier[field] . identifier[get] ( literal[string] ,[]):
identifier[remove_tree] ( identifier[ref] , identifier[unreferenced_files] )
keyword[return] identifier[set] ([ identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[filename] ) keyword[for] identifier[filename] keyword[in] identifier[unreferenced_files] ]) | def get_purge_files(root, output, output_schema, descriptor, descriptor_schema):
"""Get files to purge."""
def remove_file(fn, paths):
"""From paths remove fn and dirs before fn in dir tree."""
while fn:
for i in range(len(paths) - 1, -1, -1):
if fn == paths[i]:
paths.pop(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
(fn, _) = os.path.split(fn) # depends on [control=['while'], data=[]]
def remove_tree(fn, paths):
"""From paths remove fn and dirs before or after fn in dir tree."""
for i in range(len(paths) - 1, -1, -1):
head = paths[i]
while head:
if fn == head:
paths.pop(i)
break # depends on [control=['if'], data=[]]
(head, _) = os.path.split(head) # depends on [control=['while'], data=[]] # depends on [control=['for'], data=['i']]
remove_file(fn, paths)
def subfiles(root):
"""Extend unreferenced list with all subdirs and files in top dir."""
subs = []
for (path, dirs, files) in os.walk(root, topdown=False):
path = path[len(root) + 1:]
subs.extend((os.path.join(path, f) for f in files))
subs.extend((os.path.join(path, d) for d in dirs)) # depends on [control=['for'], data=[]]
return subs
unreferenced_files = subfiles(root)
remove_file('jsonout.txt', unreferenced_files)
remove_file('stderr.txt', unreferenced_files)
remove_file('stdout.txt', unreferenced_files)
meta_fields = [[output, output_schema], [descriptor, descriptor_schema]]
for (meta_field, meta_field_schema) in meta_fields:
for (field_schema, fields) in iterate_fields(meta_field, meta_field_schema):
if 'type' in field_schema:
field_type = field_schema['type']
field_name = field_schema['name']
# Remove basic:file: entries
if field_type.startswith('basic:file:'):
remove_file(fields[field_name]['file'], unreferenced_files) # depends on [control=['if'], data=[]]
# Remove list:basic:file: entries
elif field_type.startswith('list:basic:file:'):
for field in fields[field_name]:
remove_file(field['file'], unreferenced_files) # depends on [control=['for'], data=['field']] # depends on [control=['if'], data=[]]
# Remove basic:dir: entries
elif field_type.startswith('basic:dir:'):
remove_tree(fields[field_name]['dir'], unreferenced_files) # depends on [control=['if'], data=[]]
# Remove list:basic:dir: entries
elif field_type.startswith('list:basic:dir:'):
for field in fields[field_name]:
remove_tree(field['dir'], unreferenced_files) # depends on [control=['for'], data=['field']] # depends on [control=['if'], data=[]]
# Remove refs entries
if field_type.startswith('basic:file:') or field_type.startswith('basic:dir:'):
for ref in fields[field_name].get('refs', []):
remove_tree(ref, unreferenced_files) # depends on [control=['for'], data=['ref']] # depends on [control=['if'], data=[]]
elif field_type.startswith('list:basic:file:') or field_type.startswith('list:basic:dir:'):
for field in fields[field_name]:
for ref in field.get('refs', []):
remove_tree(ref, unreferenced_files) # depends on [control=['for'], data=['ref']] # depends on [control=['for'], data=['field']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['field_schema']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return set([os.path.join(root, filename) for filename in unreferenced_files]) |
def main(argv=None, directory=None):
"""
Main entry point for the tool, used by setup.py
Returns a value that can be passed into exit() specifying
the exit code.
1 is an error
0 is successful run
"""
logging.basicConfig(format='%(message)s')
argv = argv or sys.argv
arg_dict = parse_quality_args(argv[1:])
GitPathTool.set_cwd(directory)
fail_under = arg_dict.get('fail_under')
tool = arg_dict['violations']
user_options = arg_dict.get('options')
if user_options:
# strip quotes if present
first_char = user_options[0]
last_char = user_options[-1]
if first_char == last_char and first_char in ('"', "'"):
user_options = user_options[1:-1]
driver = QUALITY_DRIVERS.get(tool)
if driver is not None:
# If we've been given pre-generated reports,
# try to open the files
input_reports = []
for path in arg_dict['input_reports']:
try:
input_reports.append(open(path, 'rb'))
except IOError:
LOGGER.warning("Could not load '{}'".format(path))
try:
reporter = QualityReporter(driver, input_reports, user_options)
percent_passing = generate_quality_report(
reporter,
arg_dict['compare_branch'],
html_report=arg_dict['html_report'],
css_file=arg_dict['external_css_file'],
ignore_staged=arg_dict['ignore_staged'],
ignore_unstaged=arg_dict['ignore_unstaged'],
exclude=arg_dict['exclude'],
)
if percent_passing >= fail_under:
return 0
else:
LOGGER.error("Failure. Quality is below {}%.".format(fail_under))
return 1
except (ImportError, EnvironmentError):
LOGGER.error(
"Quality tool not installed: '{}'".format(tool)
)
return 1
# Close any reports we opened
finally:
for file_handle in input_reports:
file_handle.close()
else:
LOGGER.error("Quality tool not recognized: '{}'".format(tool))
return 1 | def function[main, parameter[argv, directory]]:
constant[
Main entry point for the tool, used by setup.py
Returns a value that can be passed into exit() specifying
the exit code.
1 is an error
0 is successful run
]
call[name[logging].basicConfig, parameter[]]
variable[argv] assign[=] <ast.BoolOp object at 0x7da20c9920e0>
variable[arg_dict] assign[=] call[name[parse_quality_args], parameter[call[name[argv]][<ast.Slice object at 0x7da20c9918a0>]]]
call[name[GitPathTool].set_cwd, parameter[name[directory]]]
variable[fail_under] assign[=] call[name[arg_dict].get, parameter[constant[fail_under]]]
variable[tool] assign[=] call[name[arg_dict]][constant[violations]]
variable[user_options] assign[=] call[name[arg_dict].get, parameter[constant[options]]]
if name[user_options] begin[:]
variable[first_char] assign[=] call[name[user_options]][constant[0]]
variable[last_char] assign[=] call[name[user_options]][<ast.UnaryOp object at 0x7da20c991f30>]
if <ast.BoolOp object at 0x7da20c9933a0> begin[:]
variable[user_options] assign[=] call[name[user_options]][<ast.Slice object at 0x7da20c9915a0>]
variable[driver] assign[=] call[name[QUALITY_DRIVERS].get, parameter[name[tool]]]
if compare[name[driver] is_not constant[None]] begin[:]
variable[input_reports] assign[=] list[[]]
for taget[name[path]] in starred[call[name[arg_dict]][constant[input_reports]]] begin[:]
<ast.Try object at 0x7da20c992a70>
<ast.Try object at 0x7da20c993250> | keyword[def] identifier[main] ( identifier[argv] = keyword[None] , identifier[directory] = keyword[None] ):
literal[string]
identifier[logging] . identifier[basicConfig] ( identifier[format] = literal[string] )
identifier[argv] = identifier[argv] keyword[or] identifier[sys] . identifier[argv]
identifier[arg_dict] = identifier[parse_quality_args] ( identifier[argv] [ literal[int] :])
identifier[GitPathTool] . identifier[set_cwd] ( identifier[directory] )
identifier[fail_under] = identifier[arg_dict] . identifier[get] ( literal[string] )
identifier[tool] = identifier[arg_dict] [ literal[string] ]
identifier[user_options] = identifier[arg_dict] . identifier[get] ( literal[string] )
keyword[if] identifier[user_options] :
identifier[first_char] = identifier[user_options] [ literal[int] ]
identifier[last_char] = identifier[user_options] [- literal[int] ]
keyword[if] identifier[first_char] == identifier[last_char] keyword[and] identifier[first_char] keyword[in] ( literal[string] , literal[string] ):
identifier[user_options] = identifier[user_options] [ literal[int] :- literal[int] ]
identifier[driver] = identifier[QUALITY_DRIVERS] . identifier[get] ( identifier[tool] )
keyword[if] identifier[driver] keyword[is] keyword[not] keyword[None] :
identifier[input_reports] =[]
keyword[for] identifier[path] keyword[in] identifier[arg_dict] [ literal[string] ]:
keyword[try] :
identifier[input_reports] . identifier[append] ( identifier[open] ( identifier[path] , literal[string] ))
keyword[except] identifier[IOError] :
identifier[LOGGER] . identifier[warning] ( literal[string] . identifier[format] ( identifier[path] ))
keyword[try] :
identifier[reporter] = identifier[QualityReporter] ( identifier[driver] , identifier[input_reports] , identifier[user_options] )
identifier[percent_passing] = identifier[generate_quality_report] (
identifier[reporter] ,
identifier[arg_dict] [ literal[string] ],
identifier[html_report] = identifier[arg_dict] [ literal[string] ],
identifier[css_file] = identifier[arg_dict] [ literal[string] ],
identifier[ignore_staged] = identifier[arg_dict] [ literal[string] ],
identifier[ignore_unstaged] = identifier[arg_dict] [ literal[string] ],
identifier[exclude] = identifier[arg_dict] [ literal[string] ],
)
keyword[if] identifier[percent_passing] >= identifier[fail_under] :
keyword[return] literal[int]
keyword[else] :
identifier[LOGGER] . identifier[error] ( literal[string] . identifier[format] ( identifier[fail_under] ))
keyword[return] literal[int]
keyword[except] ( identifier[ImportError] , identifier[EnvironmentError] ):
identifier[LOGGER] . identifier[error] (
literal[string] . identifier[format] ( identifier[tool] )
)
keyword[return] literal[int]
keyword[finally] :
keyword[for] identifier[file_handle] keyword[in] identifier[input_reports] :
identifier[file_handle] . identifier[close] ()
keyword[else] :
identifier[LOGGER] . identifier[error] ( literal[string] . identifier[format] ( identifier[tool] ))
keyword[return] literal[int] | def main(argv=None, directory=None):
"""
Main entry point for the tool, used by setup.py
Returns a value that can be passed into exit() specifying
the exit code.
1 is an error
0 is successful run
"""
logging.basicConfig(format='%(message)s')
argv = argv or sys.argv
arg_dict = parse_quality_args(argv[1:])
GitPathTool.set_cwd(directory)
fail_under = arg_dict.get('fail_under')
tool = arg_dict['violations']
user_options = arg_dict.get('options')
if user_options:
# strip quotes if present
first_char = user_options[0]
last_char = user_options[-1]
if first_char == last_char and first_char in ('"', "'"):
user_options = user_options[1:-1] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
driver = QUALITY_DRIVERS.get(tool)
if driver is not None:
# If we've been given pre-generated reports,
# try to open the files
input_reports = []
for path in arg_dict['input_reports']:
try:
input_reports.append(open(path, 'rb')) # depends on [control=['try'], data=[]]
except IOError:
LOGGER.warning("Could not load '{}'".format(path)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['path']]
try:
reporter = QualityReporter(driver, input_reports, user_options)
percent_passing = generate_quality_report(reporter, arg_dict['compare_branch'], html_report=arg_dict['html_report'], css_file=arg_dict['external_css_file'], ignore_staged=arg_dict['ignore_staged'], ignore_unstaged=arg_dict['ignore_unstaged'], exclude=arg_dict['exclude'])
if percent_passing >= fail_under:
return 0 # depends on [control=['if'], data=[]]
else:
LOGGER.error('Failure. Quality is below {}%.'.format(fail_under))
return 1 # depends on [control=['try'], data=[]]
except (ImportError, EnvironmentError):
LOGGER.error("Quality tool not installed: '{}'".format(tool))
return 1 # depends on [control=['except'], data=[]]
finally:
# Close any reports we opened
for file_handle in input_reports:
file_handle.close() # depends on [control=['for'], data=['file_handle']] # depends on [control=['if'], data=['driver']]
else:
LOGGER.error("Quality tool not recognized: '{}'".format(tool))
return 1 |
def do_ls(self, line):
"""ls [-a] [-l] [FILE|DIRECTORY|PATTERN]...
PATTERN supports * ? [seq] [!seq] Unix filename matching
List directory contents.
"""
args = self.line_to_args(line)
if len(args.filenames) == 0:
args.filenames = ['.']
for idx, fn in enumerate(args.filenames):
if not is_pattern(fn):
filename = resolve_path(fn)
stat = auto(get_stat, filename)
mode = stat_mode(stat)
if not mode_exists(mode):
err = "Cannot access '{}': No such file or directory"
print_err(err.format(filename))
continue
if not mode_isdir(mode):
if args.long:
print_long(fn, stat, self.print)
else:
self.print(fn)
continue
if len(args.filenames) > 1:
if idx > 0:
self.print('')
self.print("%s:" % filename)
pattern = '*'
else: # A pattern was specified
filename, pattern = validate_pattern(fn)
if filename is None: # An error was printed
continue
files = []
ldir_stat = auto(listdir_stat, filename)
if ldir_stat is None:
err = "Cannot access '{}': No such file or directory"
print_err(err.format(filename))
else:
for filename, stat in sorted(ldir_stat,
key=lambda entry: entry[0]):
if is_visible(filename) or args.all:
if fnmatch.fnmatch(filename, pattern):
if args.long:
print_long(filename, stat, self.print)
else:
files.append(decorated_filename(filename, stat))
if len(files) > 0:
print_cols(sorted(files), self.print, self.columns) | def function[do_ls, parameter[self, line]]:
constant[ls [-a] [-l] [FILE|DIRECTORY|PATTERN]...
PATTERN supports * ? [seq] [!seq] Unix filename matching
List directory contents.
]
variable[args] assign[=] call[name[self].line_to_args, parameter[name[line]]]
if compare[call[name[len], parameter[name[args].filenames]] equal[==] constant[0]] begin[:]
name[args].filenames assign[=] list[[<ast.Constant object at 0x7da2047eb7f0>]]
for taget[tuple[[<ast.Name object at 0x7da2047e8f40>, <ast.Name object at 0x7da2047e8100>]]] in starred[call[name[enumerate], parameter[name[args].filenames]]] begin[:]
if <ast.UnaryOp object at 0x7da2047e9990> begin[:]
variable[filename] assign[=] call[name[resolve_path], parameter[name[fn]]]
variable[stat] assign[=] call[name[auto], parameter[name[get_stat], name[filename]]]
variable[mode] assign[=] call[name[stat_mode], parameter[name[stat]]]
if <ast.UnaryOp object at 0x7da2047eb010> begin[:]
variable[err] assign[=] constant[Cannot access '{}': No such file or directory]
call[name[print_err], parameter[call[name[err].format, parameter[name[filename]]]]]
continue
if <ast.UnaryOp object at 0x7da2047eb160> begin[:]
if name[args].long begin[:]
call[name[print_long], parameter[name[fn], name[stat], name[self].print]]
continue
if compare[call[name[len], parameter[name[args].filenames]] greater[>] constant[1]] begin[:]
if compare[name[idx] greater[>] constant[0]] begin[:]
call[name[self].print, parameter[constant[]]]
call[name[self].print, parameter[binary_operation[constant[%s:] <ast.Mod object at 0x7da2590d6920> name[filename]]]]
variable[pattern] assign[=] constant[*]
variable[files] assign[=] list[[]]
variable[ldir_stat] assign[=] call[name[auto], parameter[name[listdir_stat], name[filename]]]
if compare[name[ldir_stat] is constant[None]] begin[:]
variable[err] assign[=] constant[Cannot access '{}': No such file or directory]
call[name[print_err], parameter[call[name[err].format, parameter[name[filename]]]]]
if compare[call[name[len], parameter[name[files]]] greater[>] constant[0]] begin[:]
call[name[print_cols], parameter[call[name[sorted], parameter[name[files]]], name[self].print, name[self].columns]] | keyword[def] identifier[do_ls] ( identifier[self] , identifier[line] ):
literal[string]
identifier[args] = identifier[self] . identifier[line_to_args] ( identifier[line] )
keyword[if] identifier[len] ( identifier[args] . identifier[filenames] )== literal[int] :
identifier[args] . identifier[filenames] =[ literal[string] ]
keyword[for] identifier[idx] , identifier[fn] keyword[in] identifier[enumerate] ( identifier[args] . identifier[filenames] ):
keyword[if] keyword[not] identifier[is_pattern] ( identifier[fn] ):
identifier[filename] = identifier[resolve_path] ( identifier[fn] )
identifier[stat] = identifier[auto] ( identifier[get_stat] , identifier[filename] )
identifier[mode] = identifier[stat_mode] ( identifier[stat] )
keyword[if] keyword[not] identifier[mode_exists] ( identifier[mode] ):
identifier[err] = literal[string]
identifier[print_err] ( identifier[err] . identifier[format] ( identifier[filename] ))
keyword[continue]
keyword[if] keyword[not] identifier[mode_isdir] ( identifier[mode] ):
keyword[if] identifier[args] . identifier[long] :
identifier[print_long] ( identifier[fn] , identifier[stat] , identifier[self] . identifier[print] )
keyword[else] :
identifier[self] . identifier[print] ( identifier[fn] )
keyword[continue]
keyword[if] identifier[len] ( identifier[args] . identifier[filenames] )> literal[int] :
keyword[if] identifier[idx] > literal[int] :
identifier[self] . identifier[print] ( literal[string] )
identifier[self] . identifier[print] ( literal[string] % identifier[filename] )
identifier[pattern] = literal[string]
keyword[else] :
identifier[filename] , identifier[pattern] = identifier[validate_pattern] ( identifier[fn] )
keyword[if] identifier[filename] keyword[is] keyword[None] :
keyword[continue]
identifier[files] =[]
identifier[ldir_stat] = identifier[auto] ( identifier[listdir_stat] , identifier[filename] )
keyword[if] identifier[ldir_stat] keyword[is] keyword[None] :
identifier[err] = literal[string]
identifier[print_err] ( identifier[err] . identifier[format] ( identifier[filename] ))
keyword[else] :
keyword[for] identifier[filename] , identifier[stat] keyword[in] identifier[sorted] ( identifier[ldir_stat] ,
identifier[key] = keyword[lambda] identifier[entry] : identifier[entry] [ literal[int] ]):
keyword[if] identifier[is_visible] ( identifier[filename] ) keyword[or] identifier[args] . identifier[all] :
keyword[if] identifier[fnmatch] . identifier[fnmatch] ( identifier[filename] , identifier[pattern] ):
keyword[if] identifier[args] . identifier[long] :
identifier[print_long] ( identifier[filename] , identifier[stat] , identifier[self] . identifier[print] )
keyword[else] :
identifier[files] . identifier[append] ( identifier[decorated_filename] ( identifier[filename] , identifier[stat] ))
keyword[if] identifier[len] ( identifier[files] )> literal[int] :
identifier[print_cols] ( identifier[sorted] ( identifier[files] ), identifier[self] . identifier[print] , identifier[self] . identifier[columns] ) | def do_ls(self, line):
"""ls [-a] [-l] [FILE|DIRECTORY|PATTERN]...
PATTERN supports * ? [seq] [!seq] Unix filename matching
List directory contents.
"""
args = self.line_to_args(line)
if len(args.filenames) == 0:
args.filenames = ['.'] # depends on [control=['if'], data=[]]
for (idx, fn) in enumerate(args.filenames):
if not is_pattern(fn):
filename = resolve_path(fn)
stat = auto(get_stat, filename)
mode = stat_mode(stat)
if not mode_exists(mode):
err = "Cannot access '{}': No such file or directory"
print_err(err.format(filename))
continue # depends on [control=['if'], data=[]]
if not mode_isdir(mode):
if args.long:
print_long(fn, stat, self.print) # depends on [control=['if'], data=[]]
else:
self.print(fn)
continue # depends on [control=['if'], data=[]]
if len(args.filenames) > 1:
if idx > 0:
self.print('') # depends on [control=['if'], data=[]]
self.print('%s:' % filename) # depends on [control=['if'], data=[]]
pattern = '*' # depends on [control=['if'], data=[]]
else: # A pattern was specified
(filename, pattern) = validate_pattern(fn)
if filename is None: # An error was printed
continue # depends on [control=['if'], data=[]]
files = []
ldir_stat = auto(listdir_stat, filename)
if ldir_stat is None:
err = "Cannot access '{}': No such file or directory"
print_err(err.format(filename)) # depends on [control=['if'], data=[]]
else:
for (filename, stat) in sorted(ldir_stat, key=lambda entry: entry[0]):
if is_visible(filename) or args.all:
if fnmatch.fnmatch(filename, pattern):
if args.long:
print_long(filename, stat, self.print) # depends on [control=['if'], data=[]]
else:
files.append(decorated_filename(filename, stat)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if len(files) > 0:
print_cols(sorted(files), self.print, self.columns) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def discands(record):
"""Display the candidates contained in a candidate record list"""
import pyraf, pyfits
pyraf.iraf.tv()
display = pyraf.iraf.tv.display
width=128
cands = record['cands']
exps= record['fileId']
### load some header info from the mophead file
headers={}
for exp in exps:
f = pyfits.open(exp+".fits")
headers[exp]={}
for key in ['MJDATE', 'NAXIS1', 'NAXIS2', 'EXPTIME', 'FILTER']:
headers[exp][key]=f[0].header[key]
headers[exp]['MJD-OBSC']=headers[exp]['MJDATE']+headers[exp]['EXPTIME']/2.0/3600.0/24.0
f.close()
import math,os
for cand in cands:
for i in range(len(exps)):
x2=[]
y2=[]
y1=[]
x1=[]
fileId=exps[i]
x2.append(int(min(math.floor(cand[i]['x'])+width,headers[fileId]['NAXIS1'])))
y2.append(int(min(math.floor(cand[i]['y'])+width,headers[fileId]['NAXIS2'])))
x1.append(int(max(math.floor(cand[i]['x'])-width,1)))
y1.append(int(max(math.floor(cand[i]['y'])-width,1)))
x_1 = min(x1)
y_1 = min(y1)
x_2 = max(x2)
y_2 = max(y2)
for i in range(len(exps)):
tvmark=open('tv.coo','w')
xshift=cand[i]['x']-cand[i]['x_0']
yshift=cand[i]['y']-cand[i]['y_0']
tvmark.write('%f %f\n' % ( cand[i]['x'], cand[i]['y']))
x1=x_1 + xshift
y1=y_1 + yshift
x2=x_2 + xshift
y2=y_2 + yshift
cutout = "[%d:%d,%d:%d]" % (x1,x2,y1,y2)
fileId=exps[i]
display(fileId+cutout,i+1)
tvmark.close()
pyraf.iraf.tv.tvmark(i+1,'tv.coo',mark='circle',radii=15)
os.unlink('tv.coo') | def function[discands, parameter[record]]:
constant[Display the candidates contained in a candidate record list]
import module[pyraf], module[pyfits]
call[name[pyraf].iraf.tv, parameter[]]
variable[display] assign[=] name[pyraf].iraf.tv.display
variable[width] assign[=] constant[128]
variable[cands] assign[=] call[name[record]][constant[cands]]
variable[exps] assign[=] call[name[record]][constant[fileId]]
variable[headers] assign[=] dictionary[[], []]
for taget[name[exp]] in starred[name[exps]] begin[:]
variable[f] assign[=] call[name[pyfits].open, parameter[binary_operation[name[exp] + constant[.fits]]]]
call[name[headers]][name[exp]] assign[=] dictionary[[], []]
for taget[name[key]] in starred[list[[<ast.Constant object at 0x7da1b1b0c220>, <ast.Constant object at 0x7da1b1b0e2f0>, <ast.Constant object at 0x7da1b1b0cac0>, <ast.Constant object at 0x7da1b1b0c880>, <ast.Constant object at 0x7da1b1b0ef20>]]] begin[:]
call[call[name[headers]][name[exp]]][name[key]] assign[=] call[call[name[f]][constant[0]].header][name[key]]
call[call[name[headers]][name[exp]]][constant[MJD-OBSC]] assign[=] binary_operation[call[call[name[headers]][name[exp]]][constant[MJDATE]] + binary_operation[binary_operation[binary_operation[call[call[name[headers]][name[exp]]][constant[EXPTIME]] / constant[2.0]] / constant[3600.0]] / constant[24.0]]]
call[name[f].close, parameter[]]
import module[math], module[os]
for taget[name[cand]] in starred[name[cands]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[exps]]]]]] begin[:]
variable[x2] assign[=] list[[]]
variable[y2] assign[=] list[[]]
variable[y1] assign[=] list[[]]
variable[x1] assign[=] list[[]]
variable[fileId] assign[=] call[name[exps]][name[i]]
call[name[x2].append, parameter[call[name[int], parameter[call[name[min], parameter[binary_operation[call[name[math].floor, parameter[call[call[name[cand]][name[i]]][constant[x]]]] + name[width]], call[call[name[headers]][name[fileId]]][constant[NAXIS1]]]]]]]]
call[name[y2].append, parameter[call[name[int], parameter[call[name[min], parameter[binary_operation[call[name[math].floor, parameter[call[call[name[cand]][name[i]]][constant[y]]]] + name[width]], call[call[name[headers]][name[fileId]]][constant[NAXIS2]]]]]]]]
call[name[x1].append, parameter[call[name[int], parameter[call[name[max], parameter[binary_operation[call[name[math].floor, parameter[call[call[name[cand]][name[i]]][constant[x]]]] - name[width]], constant[1]]]]]]]
call[name[y1].append, parameter[call[name[int], parameter[call[name[max], parameter[binary_operation[call[name[math].floor, parameter[call[call[name[cand]][name[i]]][constant[y]]]] - name[width]], constant[1]]]]]]]
variable[x_1] assign[=] call[name[min], parameter[name[x1]]]
variable[y_1] assign[=] call[name[min], parameter[name[y1]]]
variable[x_2] assign[=] call[name[max], parameter[name[x2]]]
variable[y_2] assign[=] call[name[max], parameter[name[y2]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[exps]]]]]] begin[:]
variable[tvmark] assign[=] call[name[open], parameter[constant[tv.coo], constant[w]]]
variable[xshift] assign[=] binary_operation[call[call[name[cand]][name[i]]][constant[x]] - call[call[name[cand]][name[i]]][constant[x_0]]]
variable[yshift] assign[=] binary_operation[call[call[name[cand]][name[i]]][constant[y]] - call[call[name[cand]][name[i]]][constant[y_0]]]
call[name[tvmark].write, parameter[binary_operation[constant[%f %f
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b1a74130>, <ast.Subscript object at 0x7da1b1a76320>]]]]]
variable[x1] assign[=] binary_operation[name[x_1] + name[xshift]]
variable[y1] assign[=] binary_operation[name[y_1] + name[yshift]]
variable[x2] assign[=] binary_operation[name[x_2] + name[xshift]]
variable[y2] assign[=] binary_operation[name[y_2] + name[yshift]]
variable[cutout] assign[=] binary_operation[constant[[%d:%d,%d:%d]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1a74d00>, <ast.Name object at 0x7da1b1a766b0>, <ast.Name object at 0x7da1b1a75de0>, <ast.Name object at 0x7da1b1a76500>]]]
variable[fileId] assign[=] call[name[exps]][name[i]]
call[name[display], parameter[binary_operation[name[fileId] + name[cutout]], binary_operation[name[i] + constant[1]]]]
call[name[tvmark].close, parameter[]]
call[name[pyraf].iraf.tv.tvmark, parameter[binary_operation[name[i] + constant[1]], constant[tv.coo]]]
call[name[os].unlink, parameter[constant[tv.coo]]] | keyword[def] identifier[discands] ( identifier[record] ):
literal[string]
keyword[import] identifier[pyraf] , identifier[pyfits]
identifier[pyraf] . identifier[iraf] . identifier[tv] ()
identifier[display] = identifier[pyraf] . identifier[iraf] . identifier[tv] . identifier[display]
identifier[width] = literal[int]
identifier[cands] = identifier[record] [ literal[string] ]
identifier[exps] = identifier[record] [ literal[string] ]
identifier[headers] ={}
keyword[for] identifier[exp] keyword[in] identifier[exps] :
identifier[f] = identifier[pyfits] . identifier[open] ( identifier[exp] + literal[string] )
identifier[headers] [ identifier[exp] ]={}
keyword[for] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[headers] [ identifier[exp] ][ identifier[key] ]= identifier[f] [ literal[int] ]. identifier[header] [ identifier[key] ]
identifier[headers] [ identifier[exp] ][ literal[string] ]= identifier[headers] [ identifier[exp] ][ literal[string] ]+ identifier[headers] [ identifier[exp] ][ literal[string] ]/ literal[int] / literal[int] / literal[int]
identifier[f] . identifier[close] ()
keyword[import] identifier[math] , identifier[os]
keyword[for] identifier[cand] keyword[in] identifier[cands] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[exps] )):
identifier[x2] =[]
identifier[y2] =[]
identifier[y1] =[]
identifier[x1] =[]
identifier[fileId] = identifier[exps] [ identifier[i] ]
identifier[x2] . identifier[append] ( identifier[int] ( identifier[min] ( identifier[math] . identifier[floor] ( identifier[cand] [ identifier[i] ][ literal[string] ])+ identifier[width] , identifier[headers] [ identifier[fileId] ][ literal[string] ])))
identifier[y2] . identifier[append] ( identifier[int] ( identifier[min] ( identifier[math] . identifier[floor] ( identifier[cand] [ identifier[i] ][ literal[string] ])+ identifier[width] , identifier[headers] [ identifier[fileId] ][ literal[string] ])))
identifier[x1] . identifier[append] ( identifier[int] ( identifier[max] ( identifier[math] . identifier[floor] ( identifier[cand] [ identifier[i] ][ literal[string] ])- identifier[width] , literal[int] )))
identifier[y1] . identifier[append] ( identifier[int] ( identifier[max] ( identifier[math] . identifier[floor] ( identifier[cand] [ identifier[i] ][ literal[string] ])- identifier[width] , literal[int] )))
identifier[x_1] = identifier[min] ( identifier[x1] )
identifier[y_1] = identifier[min] ( identifier[y1] )
identifier[x_2] = identifier[max] ( identifier[x2] )
identifier[y_2] = identifier[max] ( identifier[y2] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[exps] )):
identifier[tvmark] = identifier[open] ( literal[string] , literal[string] )
identifier[xshift] = identifier[cand] [ identifier[i] ][ literal[string] ]- identifier[cand] [ identifier[i] ][ literal[string] ]
identifier[yshift] = identifier[cand] [ identifier[i] ][ literal[string] ]- identifier[cand] [ identifier[i] ][ literal[string] ]
identifier[tvmark] . identifier[write] ( literal[string] %( identifier[cand] [ identifier[i] ][ literal[string] ], identifier[cand] [ identifier[i] ][ literal[string] ]))
identifier[x1] = identifier[x_1] + identifier[xshift]
identifier[y1] = identifier[y_1] + identifier[yshift]
identifier[x2] = identifier[x_2] + identifier[xshift]
identifier[y2] = identifier[y_2] + identifier[yshift]
identifier[cutout] = literal[string] %( identifier[x1] , identifier[x2] , identifier[y1] , identifier[y2] )
identifier[fileId] = identifier[exps] [ identifier[i] ]
identifier[display] ( identifier[fileId] + identifier[cutout] , identifier[i] + literal[int] )
identifier[tvmark] . identifier[close] ()
identifier[pyraf] . identifier[iraf] . identifier[tv] . identifier[tvmark] ( identifier[i] + literal[int] , literal[string] , identifier[mark] = literal[string] , identifier[radii] = literal[int] )
identifier[os] . identifier[unlink] ( literal[string] ) | def discands(record):
"""Display the candidates contained in a candidate record list"""
import pyraf, pyfits
pyraf.iraf.tv()
display = pyraf.iraf.tv.display
width = 128
cands = record['cands']
exps = record['fileId']
### load some header info from the mophead file
headers = {}
for exp in exps:
f = pyfits.open(exp + '.fits')
headers[exp] = {}
for key in ['MJDATE', 'NAXIS1', 'NAXIS2', 'EXPTIME', 'FILTER']:
headers[exp][key] = f[0].header[key] # depends on [control=['for'], data=['key']]
headers[exp]['MJD-OBSC'] = headers[exp]['MJDATE'] + headers[exp]['EXPTIME'] / 2.0 / 3600.0 / 24.0
f.close() # depends on [control=['for'], data=['exp']]
import math, os
for cand in cands:
for i in range(len(exps)):
x2 = []
y2 = []
y1 = []
x1 = []
fileId = exps[i]
x2.append(int(min(math.floor(cand[i]['x']) + width, headers[fileId]['NAXIS1'])))
y2.append(int(min(math.floor(cand[i]['y']) + width, headers[fileId]['NAXIS2'])))
x1.append(int(max(math.floor(cand[i]['x']) - width, 1)))
y1.append(int(max(math.floor(cand[i]['y']) - width, 1))) # depends on [control=['for'], data=['i']]
x_1 = min(x1)
y_1 = min(y1)
x_2 = max(x2)
y_2 = max(y2)
for i in range(len(exps)):
tvmark = open('tv.coo', 'w')
xshift = cand[i]['x'] - cand[i]['x_0']
yshift = cand[i]['y'] - cand[i]['y_0']
tvmark.write('%f %f\n' % (cand[i]['x'], cand[i]['y']))
x1 = x_1 + xshift
y1 = y_1 + yshift
x2 = x_2 + xshift
y2 = y_2 + yshift
cutout = '[%d:%d,%d:%d]' % (x1, x2, y1, y2)
fileId = exps[i]
display(fileId + cutout, i + 1)
tvmark.close()
pyraf.iraf.tv.tvmark(i + 1, 'tv.coo', mark='circle', radii=15)
os.unlink('tv.coo') # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['cand']] |
def from_xarray(da, crs=None, apply_transform=False, nan_nodata=False, **kwargs):
"""
Returns an RGB or Image element given an xarray DataArray
loaded using xr.open_rasterio.
If a crs attribute is present on the loaded data it will
attempt to decode it into a cartopy projection otherwise it
will default to a non-geographic HoloViews element.
Parameters
----------
da: xarray.DataArray
DataArray to convert to element
crs: Cartopy CRS or EPSG string (optional)
Overrides CRS inferred from the data
apply_transform: boolean
Whether to apply affine transform if defined on the data
nan_nodata: boolean
If data contains nodata values convert them to NaNs
**kwargs:
Keyword arguments passed to the HoloViews/GeoViews element
Returns
-------
element: Image/RGB/QuadMesh element
"""
if crs:
kwargs['crs'] = crs
elif hasattr(da, 'crs'):
try:
kwargs['crs'] = process_crs(da.crs)
except:
param.main.warning('Could not decode projection from crs string %r, '
'defaulting to non-geographic element.' % da.crs)
coords = list(da.coords)
if coords not in (['band', 'y', 'x'], ['y', 'x']):
from .element.geo import Dataset, HvDataset
el = Dataset if 'crs' in kwargs else HvDataset
return el(da, **kwargs)
if len(coords) == 2:
y, x = coords
bands = 1
else:
y, x = coords[1:]
bands = len(da.coords[coords[0]])
if apply_transform:
from affine import Affine
transform = Affine.from_gdal(*da.attrs['transform'][:6])
nx, ny = da.sizes[x], da.sizes[y]
xs, ys = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform
data = (xs, ys)
else:
xres, yres = da.attrs['res'] if 'res' in da.attrs else (1, 1)
xs = da.coords[x][::-1] if xres < 0 else da.coords[x]
ys = da.coords[y][::-1] if yres < 0 else da.coords[y]
data = (xs, ys)
for b in range(bands):
values = da[b].values
if nan_nodata and da.attrs.get('nodatavals', []):
values = values.astype(float)
for d in da.attrs['nodatavals']:
values[values==d] = np.NaN
data += (values,)
if 'datatype' not in kwargs:
kwargs['datatype'] = ['xarray', 'grid', 'image']
if xs.ndim > 1:
from .element.geo import QuadMesh, HvQuadMesh
el = QuadMesh if 'crs' in kwargs else HvQuadMesh
el = el(data, [x, y], **kwargs)
elif bands < 3:
from .element.geo import Image, HvImage
el = Image if 'crs' in kwargs else HvImage
el = el(data, [x, y], **kwargs)
else:
from .element.geo import RGB, HvRGB
el = RGB if 'crs' in kwargs else HvRGB
vdims = el.vdims[:bands]
el = el(data, [x, y], vdims, **kwargs)
if hasattr(el.data, 'attrs'):
el.data.attrs = da.attrs
return el | def function[from_xarray, parameter[da, crs, apply_transform, nan_nodata]]:
constant[
Returns an RGB or Image element given an xarray DataArray
loaded using xr.open_rasterio.
If a crs attribute is present on the loaded data it will
attempt to decode it into a cartopy projection otherwise it
will default to a non-geographic HoloViews element.
Parameters
----------
da: xarray.DataArray
DataArray to convert to element
crs: Cartopy CRS or EPSG string (optional)
Overrides CRS inferred from the data
apply_transform: boolean
Whether to apply affine transform if defined on the data
nan_nodata: boolean
If data contains nodata values convert them to NaNs
**kwargs:
Keyword arguments passed to the HoloViews/GeoViews element
Returns
-------
element: Image/RGB/QuadMesh element
]
if name[crs] begin[:]
call[name[kwargs]][constant[crs]] assign[=] name[crs]
variable[coords] assign[=] call[name[list], parameter[name[da].coords]]
if compare[name[coords] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.List object at 0x7da1b07b3d30>, <ast.List object at 0x7da1b07b14e0>]]] begin[:]
from relative_module[element.geo] import module[Dataset], module[HvDataset]
variable[el] assign[=] <ast.IfExp object at 0x7da1b07b2860>
return[call[name[el], parameter[name[da]]]]
if compare[call[name[len], parameter[name[coords]]] equal[==] constant[2]] begin[:]
<ast.Tuple object at 0x7da1b07b19f0> assign[=] name[coords]
variable[bands] assign[=] constant[1]
if name[apply_transform] begin[:]
from relative_module[affine] import module[Affine]
variable[transform] assign[=] call[name[Affine].from_gdal, parameter[<ast.Starred object at 0x7da1b07b23e0>]]
<ast.Tuple object at 0x7da1b07b1ea0> assign[=] tuple[[<ast.Subscript object at 0x7da1b07b1960>, <ast.Subscript object at 0x7da1b07b2b90>]]
<ast.Tuple object at 0x7da1b07b0310> assign[=] binary_operation[call[name[np].meshgrid, parameter[binary_operation[call[name[np].arange, parameter[name[nx]]] + constant[0.5]], binary_operation[call[name[np].arange, parameter[name[ny]]] + constant[0.5]]]] * name[transform]]
variable[data] assign[=] tuple[[<ast.Name object at 0x7da1b07b0ca0>, <ast.Name object at 0x7da1b07b2110>]]
variable[data] assign[=] tuple[[<ast.Name object at 0x7da1b07b1510>, <ast.Name object at 0x7da1b07b21d0>]]
for taget[name[b]] in starred[call[name[range], parameter[name[bands]]]] begin[:]
variable[values] assign[=] call[name[da]][name[b]].values
if <ast.BoolOp object at 0x7da1b07b3ac0> begin[:]
variable[values] assign[=] call[name[values].astype, parameter[name[float]]]
for taget[name[d]] in starred[call[name[da].attrs][constant[nodatavals]]] begin[:]
call[name[values]][compare[name[values] equal[==] name[d]]] assign[=] name[np].NaN
<ast.AugAssign object at 0x7da1b07b3160>
if compare[constant[datatype] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[datatype]] assign[=] list[[<ast.Constant object at 0x7da1b07b3be0>, <ast.Constant object at 0x7da1b07b3280>, <ast.Constant object at 0x7da1b07b1930>]]
if compare[name[xs].ndim greater[>] constant[1]] begin[:]
from relative_module[element.geo] import module[QuadMesh], module[HvQuadMesh]
variable[el] assign[=] <ast.IfExp object at 0x7da1b07b0190>
variable[el] assign[=] call[name[el], parameter[name[data], list[[<ast.Name object at 0x7da1b08323b0>, <ast.Name object at 0x7da1b0830f10>]]]]
if call[name[hasattr], parameter[name[el].data, constant[attrs]]] begin[:]
name[el].data.attrs assign[=] name[da].attrs
return[name[el]] | keyword[def] identifier[from_xarray] ( identifier[da] , identifier[crs] = keyword[None] , identifier[apply_transform] = keyword[False] , identifier[nan_nodata] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[crs] :
identifier[kwargs] [ literal[string] ]= identifier[crs]
keyword[elif] identifier[hasattr] ( identifier[da] , literal[string] ):
keyword[try] :
identifier[kwargs] [ literal[string] ]= identifier[process_crs] ( identifier[da] . identifier[crs] )
keyword[except] :
identifier[param] . identifier[main] . identifier[warning] ( literal[string]
literal[string] % identifier[da] . identifier[crs] )
identifier[coords] = identifier[list] ( identifier[da] . identifier[coords] )
keyword[if] identifier[coords] keyword[not] keyword[in] ([ literal[string] , literal[string] , literal[string] ],[ literal[string] , literal[string] ]):
keyword[from] . identifier[element] . identifier[geo] keyword[import] identifier[Dataset] , identifier[HvDataset]
identifier[el] = identifier[Dataset] keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[else] identifier[HvDataset]
keyword[return] identifier[el] ( identifier[da] ,** identifier[kwargs] )
keyword[if] identifier[len] ( identifier[coords] )== literal[int] :
identifier[y] , identifier[x] = identifier[coords]
identifier[bands] = literal[int]
keyword[else] :
identifier[y] , identifier[x] = identifier[coords] [ literal[int] :]
identifier[bands] = identifier[len] ( identifier[da] . identifier[coords] [ identifier[coords] [ literal[int] ]])
keyword[if] identifier[apply_transform] :
keyword[from] identifier[affine] keyword[import] identifier[Affine]
identifier[transform] = identifier[Affine] . identifier[from_gdal] (* identifier[da] . identifier[attrs] [ literal[string] ][: literal[int] ])
identifier[nx] , identifier[ny] = identifier[da] . identifier[sizes] [ identifier[x] ], identifier[da] . identifier[sizes] [ identifier[y] ]
identifier[xs] , identifier[ys] = identifier[np] . identifier[meshgrid] ( identifier[np] . identifier[arange] ( identifier[nx] )+ literal[int] , identifier[np] . identifier[arange] ( identifier[ny] )+ literal[int] )* identifier[transform]
identifier[data] =( identifier[xs] , identifier[ys] )
keyword[else] :
identifier[xres] , identifier[yres] = identifier[da] . identifier[attrs] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[da] . identifier[attrs] keyword[else] ( literal[int] , literal[int] )
identifier[xs] = identifier[da] . identifier[coords] [ identifier[x] ][::- literal[int] ] keyword[if] identifier[xres] < literal[int] keyword[else] identifier[da] . identifier[coords] [ identifier[x] ]
identifier[ys] = identifier[da] . identifier[coords] [ identifier[y] ][::- literal[int] ] keyword[if] identifier[yres] < literal[int] keyword[else] identifier[da] . identifier[coords] [ identifier[y] ]
identifier[data] =( identifier[xs] , identifier[ys] )
keyword[for] identifier[b] keyword[in] identifier[range] ( identifier[bands] ):
identifier[values] = identifier[da] [ identifier[b] ]. identifier[values]
keyword[if] identifier[nan_nodata] keyword[and] identifier[da] . identifier[attrs] . identifier[get] ( literal[string] ,[]):
identifier[values] = identifier[values] . identifier[astype] ( identifier[float] )
keyword[for] identifier[d] keyword[in] identifier[da] . identifier[attrs] [ literal[string] ]:
identifier[values] [ identifier[values] == identifier[d] ]= identifier[np] . identifier[NaN]
identifier[data] +=( identifier[values] ,)
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]=[ literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[xs] . identifier[ndim] > literal[int] :
keyword[from] . identifier[element] . identifier[geo] keyword[import] identifier[QuadMesh] , identifier[HvQuadMesh]
identifier[el] = identifier[QuadMesh] keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[else] identifier[HvQuadMesh]
identifier[el] = identifier[el] ( identifier[data] ,[ identifier[x] , identifier[y] ],** identifier[kwargs] )
keyword[elif] identifier[bands] < literal[int] :
keyword[from] . identifier[element] . identifier[geo] keyword[import] identifier[Image] , identifier[HvImage]
identifier[el] = identifier[Image] keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[else] identifier[HvImage]
identifier[el] = identifier[el] ( identifier[data] ,[ identifier[x] , identifier[y] ],** identifier[kwargs] )
keyword[else] :
keyword[from] . identifier[element] . identifier[geo] keyword[import] identifier[RGB] , identifier[HvRGB]
identifier[el] = identifier[RGB] keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[else] identifier[HvRGB]
identifier[vdims] = identifier[el] . identifier[vdims] [: identifier[bands] ]
identifier[el] = identifier[el] ( identifier[data] ,[ identifier[x] , identifier[y] ], identifier[vdims] ,** identifier[kwargs] )
keyword[if] identifier[hasattr] ( identifier[el] . identifier[data] , literal[string] ):
identifier[el] . identifier[data] . identifier[attrs] = identifier[da] . identifier[attrs]
keyword[return] identifier[el] | def from_xarray(da, crs=None, apply_transform=False, nan_nodata=False, **kwargs):
"""
Returns an RGB or Image element given an xarray DataArray
loaded using xr.open_rasterio.
If a crs attribute is present on the loaded data it will
attempt to decode it into a cartopy projection otherwise it
will default to a non-geographic HoloViews element.
Parameters
----------
da: xarray.DataArray
DataArray to convert to element
crs: Cartopy CRS or EPSG string (optional)
Overrides CRS inferred from the data
apply_transform: boolean
Whether to apply affine transform if defined on the data
nan_nodata: boolean
If data contains nodata values convert them to NaNs
**kwargs:
Keyword arguments passed to the HoloViews/GeoViews element
Returns
-------
element: Image/RGB/QuadMesh element
"""
if crs:
kwargs['crs'] = crs # depends on [control=['if'], data=[]]
elif hasattr(da, 'crs'):
try:
kwargs['crs'] = process_crs(da.crs) # depends on [control=['try'], data=[]]
except:
param.main.warning('Could not decode projection from crs string %r, defaulting to non-geographic element.' % da.crs) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
coords = list(da.coords)
if coords not in (['band', 'y', 'x'], ['y', 'x']):
from .element.geo import Dataset, HvDataset
el = Dataset if 'crs' in kwargs else HvDataset
return el(da, **kwargs) # depends on [control=['if'], data=[]]
if len(coords) == 2:
(y, x) = coords
bands = 1 # depends on [control=['if'], data=[]]
else:
(y, x) = coords[1:]
bands = len(da.coords[coords[0]])
if apply_transform:
from affine import Affine
transform = Affine.from_gdal(*da.attrs['transform'][:6])
(nx, ny) = (da.sizes[x], da.sizes[y])
(xs, ys) = np.meshgrid(np.arange(nx) + 0.5, np.arange(ny) + 0.5) * transform
data = (xs, ys) # depends on [control=['if'], data=[]]
else:
(xres, yres) = da.attrs['res'] if 'res' in da.attrs else (1, 1)
xs = da.coords[x][::-1] if xres < 0 else da.coords[x]
ys = da.coords[y][::-1] if yres < 0 else da.coords[y]
data = (xs, ys)
for b in range(bands):
values = da[b].values
if nan_nodata and da.attrs.get('nodatavals', []):
values = values.astype(float)
for d in da.attrs['nodatavals']:
values[values == d] = np.NaN # depends on [control=['for'], data=['d']] # depends on [control=['if'], data=[]]
data += (values,) # depends on [control=['for'], data=['b']]
if 'datatype' not in kwargs:
kwargs['datatype'] = ['xarray', 'grid', 'image'] # depends on [control=['if'], data=['kwargs']]
if xs.ndim > 1:
from .element.geo import QuadMesh, HvQuadMesh
el = QuadMesh if 'crs' in kwargs else HvQuadMesh
el = el(data, [x, y], **kwargs) # depends on [control=['if'], data=[]]
elif bands < 3:
from .element.geo import Image, HvImage
el = Image if 'crs' in kwargs else HvImage
el = el(data, [x, y], **kwargs) # depends on [control=['if'], data=[]]
else:
from .element.geo import RGB, HvRGB
el = RGB if 'crs' in kwargs else HvRGB
vdims = el.vdims[:bands]
el = el(data, [x, y], vdims, **kwargs)
if hasattr(el.data, 'attrs'):
el.data.attrs = da.attrs # depends on [control=['if'], data=[]]
return el |
def reset(self):
"""
Clears all entries.
:return: None
"""
for i in range(len(self.values)):
self.values[i].delete(0, tk.END)
if self.defaults[i] is not None:
self.values[i].insert(0, self.defaults[i]) | def function[reset, parameter[self]]:
constant[
Clears all entries.
:return: None
]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].values]]]]] begin[:]
call[call[name[self].values][name[i]].delete, parameter[constant[0], name[tk].END]]
if compare[call[name[self].defaults][name[i]] is_not constant[None]] begin[:]
call[call[name[self].values][name[i]].insert, parameter[constant[0], call[name[self].defaults][name[i]]]] | keyword[def] identifier[reset] ( identifier[self] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[values] )):
identifier[self] . identifier[values] [ identifier[i] ]. identifier[delete] ( literal[int] , identifier[tk] . identifier[END] )
keyword[if] identifier[self] . identifier[defaults] [ identifier[i] ] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[values] [ identifier[i] ]. identifier[insert] ( literal[int] , identifier[self] . identifier[defaults] [ identifier[i] ]) | def reset(self):
"""
Clears all entries.
:return: None
"""
for i in range(len(self.values)):
self.values[i].delete(0, tk.END)
if self.defaults[i] is not None:
self.values[i].insert(0, self.defaults[i]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] |
def _convert_list_to_json(array):
""" Converts array to a json string """
return json.dumps(array, skipkeys=False, allow_nan=False, indent=None, separators=(",", ":")) | def function[_convert_list_to_json, parameter[array]]:
constant[ Converts array to a json string ]
return[call[name[json].dumps, parameter[name[array]]]] | keyword[def] identifier[_convert_list_to_json] ( identifier[array] ):
literal[string]
keyword[return] identifier[json] . identifier[dumps] ( identifier[array] , identifier[skipkeys] = keyword[False] , identifier[allow_nan] = keyword[False] , identifier[indent] = keyword[None] , identifier[separators] =( literal[string] , literal[string] )) | def _convert_list_to_json(array):
""" Converts array to a json string """
return json.dumps(array, skipkeys=False, allow_nan=False, indent=None, separators=(',', ':')) |
def verify_user_alias(user, token):
"""
Marks a user's contact point as verified depending on accepted token type.
"""
if token.to_alias_type == 'EMAIL':
if token.to_alias == getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME):
setattr(user, api_settings.PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME, True)
elif token.to_alias_type == 'MOBILE':
if token.to_alias == getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME):
setattr(user, api_settings.PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME, True)
else:
return False
user.save()
return True | def function[verify_user_alias, parameter[user, token]]:
constant[
Marks a user's contact point as verified depending on accepted token type.
]
if compare[name[token].to_alias_type equal[==] constant[EMAIL]] begin[:]
if compare[name[token].to_alias equal[==] call[name[getattr], parameter[name[user], name[api_settings].PASSWORDLESS_USER_EMAIL_FIELD_NAME]]] begin[:]
call[name[setattr], parameter[name[user], name[api_settings].PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME, constant[True]]]
call[name[user].save, parameter[]]
return[constant[True]] | keyword[def] identifier[verify_user_alias] ( identifier[user] , identifier[token] ):
literal[string]
keyword[if] identifier[token] . identifier[to_alias_type] == literal[string] :
keyword[if] identifier[token] . identifier[to_alias] == identifier[getattr] ( identifier[user] , identifier[api_settings] . identifier[PASSWORDLESS_USER_EMAIL_FIELD_NAME] ):
identifier[setattr] ( identifier[user] , identifier[api_settings] . identifier[PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME] , keyword[True] )
keyword[elif] identifier[token] . identifier[to_alias_type] == literal[string] :
keyword[if] identifier[token] . identifier[to_alias] == identifier[getattr] ( identifier[user] , identifier[api_settings] . identifier[PASSWORDLESS_USER_MOBILE_FIELD_NAME] ):
identifier[setattr] ( identifier[user] , identifier[api_settings] . identifier[PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME] , keyword[True] )
keyword[else] :
keyword[return] keyword[False]
identifier[user] . identifier[save] ()
keyword[return] keyword[True] | def verify_user_alias(user, token):
"""
Marks a user's contact point as verified depending on accepted token type.
"""
if token.to_alias_type == 'EMAIL':
if token.to_alias == getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME):
setattr(user, api_settings.PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME, True) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif token.to_alias_type == 'MOBILE':
if token.to_alias == getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME):
setattr(user, api_settings.PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME, True) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
return False
user.save()
return True |
def _set_rock_ridge(self, rr):
# type: (str) -> None
'''
An internal method to set the Rock Ridge version of the ISO given the
Rock Ridge version of the previous entry.
Parameters:
rr - The version of rr from the last directory record.
Returns:
Nothing.
'''
# We don't allow mixed Rock Ridge versions on the ISO, so apply some
# checking. If the current overall Rock Ridge version on the ISO is
# None, we upgrade it to whatever version we were given. Once we have
# seen a particular version, we only allow records of that version or
# None (to account for dotdot records which have no Rock Ridge).
if not self.rock_ridge:
self.rock_ridge = rr # type: str
else:
for ver in ['1.09', '1.10', '1.12']:
if self.rock_ridge == ver:
if rr and rr != ver:
raise pycdlibexception.PyCdlibInvalidISO('Inconsistent Rock Ridge versions on the ISO!') | def function[_set_rock_ridge, parameter[self, rr]]:
constant[
An internal method to set the Rock Ridge version of the ISO given the
Rock Ridge version of the previous entry.
Parameters:
rr - The version of rr from the last directory record.
Returns:
Nothing.
]
if <ast.UnaryOp object at 0x7da18bc70190> begin[:]
name[self].rock_ridge assign[=] name[rr] | keyword[def] identifier[_set_rock_ridge] ( identifier[self] , identifier[rr] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[rock_ridge] :
identifier[self] . identifier[rock_ridge] = identifier[rr]
keyword[else] :
keyword[for] identifier[ver] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[if] identifier[self] . identifier[rock_ridge] == identifier[ver] :
keyword[if] identifier[rr] keyword[and] identifier[rr] != identifier[ver] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInvalidISO] ( literal[string] ) | def _set_rock_ridge(self, rr):
# type: (str) -> None
'\n An internal method to set the Rock Ridge version of the ISO given the\n Rock Ridge version of the previous entry.\n\n Parameters:\n rr - The version of rr from the last directory record.\n Returns:\n Nothing.\n '
# We don't allow mixed Rock Ridge versions on the ISO, so apply some
# checking. If the current overall Rock Ridge version on the ISO is
# None, we upgrade it to whatever version we were given. Once we have
# seen a particular version, we only allow records of that version or
# None (to account for dotdot records which have no Rock Ridge).
if not self.rock_ridge:
self.rock_ridge = rr # type: str # depends on [control=['if'], data=[]]
else:
for ver in ['1.09', '1.10', '1.12']:
if self.rock_ridge == ver:
if rr and rr != ver:
raise pycdlibexception.PyCdlibInvalidISO('Inconsistent Rock Ridge versions on the ISO!') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['ver']] # depends on [control=['for'], data=['ver']] |
def new_values(self):
'''Returns the new values from the diff'''
def get_new_values_and_key(item):
values = item.new_values
if item.past_dict:
values.update({self._key: item.past_dict[self._key]})
else:
# This is a new item as it has no past_dict
values.update({self._key: item.current_dict[self._key]})
return values
return [get_new_values_and_key(el)
for el in self._get_recursive_difference('all')
if el.diffs and el.current_dict] | def function[new_values, parameter[self]]:
constant[Returns the new values from the diff]
def function[get_new_values_and_key, parameter[item]]:
variable[values] assign[=] name[item].new_values
if name[item].past_dict begin[:]
call[name[values].update, parameter[dictionary[[<ast.Attribute object at 0x7da1b1f37940>], [<ast.Subscript object at 0x7da1b1f35ab0>]]]]
return[name[values]]
return[<ast.ListComp object at 0x7da1b1f37340>] | keyword[def] identifier[new_values] ( identifier[self] ):
literal[string]
keyword[def] identifier[get_new_values_and_key] ( identifier[item] ):
identifier[values] = identifier[item] . identifier[new_values]
keyword[if] identifier[item] . identifier[past_dict] :
identifier[values] . identifier[update] ({ identifier[self] . identifier[_key] : identifier[item] . identifier[past_dict] [ identifier[self] . identifier[_key] ]})
keyword[else] :
identifier[values] . identifier[update] ({ identifier[self] . identifier[_key] : identifier[item] . identifier[current_dict] [ identifier[self] . identifier[_key] ]})
keyword[return] identifier[values]
keyword[return] [ identifier[get_new_values_and_key] ( identifier[el] )
keyword[for] identifier[el] keyword[in] identifier[self] . identifier[_get_recursive_difference] ( literal[string] )
keyword[if] identifier[el] . identifier[diffs] keyword[and] identifier[el] . identifier[current_dict] ] | def new_values(self):
"""Returns the new values from the diff"""
def get_new_values_and_key(item):
values = item.new_values
if item.past_dict:
values.update({self._key: item.past_dict[self._key]}) # depends on [control=['if'], data=[]]
else:
# This is a new item as it has no past_dict
values.update({self._key: item.current_dict[self._key]})
return values
return [get_new_values_and_key(el) for el in self._get_recursive_difference('all') if el.diffs and el.current_dict] |
def remove_hooks(target, **hooks):
"""
Remove the given hooks from the given target.
:param target: The object from which to remove hooks. If all hooks are removed from a given method, the
HookedMethod object will be removed and replaced with the original function.
:param hooks: Any keywords will be interpreted as hooks to remove. You must provide the exact hook that was applied
so that it can it can be identified for removal among any other hooks.
"""
for name, hook in hooks.items():
hooked = getattr(target, name)
if hook in hooked.pending:
try:
hooked.pending.remove(hook)
except ValueError as e:
raise ValueError("%s is not hooked by %s" % (target, hook)) from e
if not hooked.pending:
setattr(target, name, hooked.func) | def function[remove_hooks, parameter[target]]:
constant[
Remove the given hooks from the given target.
:param target: The object from which to remove hooks. If all hooks are removed from a given method, the
HookedMethod object will be removed and replaced with the original function.
:param hooks: Any keywords will be interpreted as hooks to remove. You must provide the exact hook that was applied
so that it can it can be identified for removal among any other hooks.
]
for taget[tuple[[<ast.Name object at 0x7da18bc70f40>, <ast.Name object at 0x7da18bc737f0>]]] in starred[call[name[hooks].items, parameter[]]] begin[:]
variable[hooked] assign[=] call[name[getattr], parameter[name[target], name[name]]]
if compare[name[hook] in name[hooked].pending] begin[:]
<ast.Try object at 0x7da18c4cd7b0>
if <ast.UnaryOp object at 0x7da18c4cd2d0> begin[:]
call[name[setattr], parameter[name[target], name[name], name[hooked].func]] | keyword[def] identifier[remove_hooks] ( identifier[target] ,** identifier[hooks] ):
literal[string]
keyword[for] identifier[name] , identifier[hook] keyword[in] identifier[hooks] . identifier[items] ():
identifier[hooked] = identifier[getattr] ( identifier[target] , identifier[name] )
keyword[if] identifier[hook] keyword[in] identifier[hooked] . identifier[pending] :
keyword[try] :
identifier[hooked] . identifier[pending] . identifier[remove] ( identifier[hook] )
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[target] , identifier[hook] )) keyword[from] identifier[e]
keyword[if] keyword[not] identifier[hooked] . identifier[pending] :
identifier[setattr] ( identifier[target] , identifier[name] , identifier[hooked] . identifier[func] ) | def remove_hooks(target, **hooks):
"""
Remove the given hooks from the given target.
:param target: The object from which to remove hooks. If all hooks are removed from a given method, the
HookedMethod object will be removed and replaced with the original function.
:param hooks: Any keywords will be interpreted as hooks to remove. You must provide the exact hook that was applied
so that it can it can be identified for removal among any other hooks.
"""
for (name, hook) in hooks.items():
hooked = getattr(target, name)
if hook in hooked.pending:
try:
hooked.pending.remove(hook) # depends on [control=['try'], data=[]]
except ValueError as e:
raise ValueError('%s is not hooked by %s' % (target, hook)) from e # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=['hook']]
if not hooked.pending:
setattr(target, name, hooked.func) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def get_all_names_page(offset, count, include_expired=False, hostport=None, proxy=None):
"""
get a page of all the names
Returns the list of names on success
Returns {'error': ...} on error
"""
assert proxy or hostport, 'Need proxy or hostport'
if proxy is None:
proxy = connect_hostport(hostport)
page_schema = {
'type': 'object',
'properties': {
'names': {
'type': 'array',
'items': {
'type': 'string',
'uniqueItems': True
},
},
},
'required': [
'names',
],
}
schema = json_response_schema(page_schema)
try:
assert count <= 100, 'Page too big: {}'.format(count)
except AssertionError as ae:
if BLOCKSTACK_DEBUG:
log.exception(ae)
return {'error': 'Invalid page', 'http_status': 400}
resp = {}
try:
if include_expired:
resp = proxy.get_all_names_cumulative(offset, count)
else:
resp = proxy.get_all_names(offset, count)
resp = json_validate(schema, resp)
if json_is_error(resp):
return resp
# must be valid names
valid_names = []
for n in resp['names']:
if not is_name_valid(str(n)):
log.error('Invalid name "{}"'.format(str(n)))
else:
valid_names.append(n)
resp['names'] = valid_names
except ValidationError as ve:
if BLOCKSTACK_DEBUG:
log.exception(ve)
resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502}
return resp
except socket.timeout:
log.error("Connection timed out")
resp = {'error': 'Connection to remote host timed out.', 'http_status': 503}
return resp
except socket.error as se:
log.error("Connection error {}".format(se.errno))
resp = {'error': 'Connection to remote host failed.', 'http_status': 502}
return resp
except Exception as ee:
if BLOCKSTACK_DEBUG:
log.exception(ee)
log.error("Caught exception while connecting to Blockstack node: {}".format(ee))
resp = {'error': 'Failed to contact Blockstack node. Try again with `--debug`.', 'http_status': 500}
return resp
return resp['names'] | def function[get_all_names_page, parameter[offset, count, include_expired, hostport, proxy]]:
constant[
get a page of all the names
Returns the list of names on success
Returns {'error': ...} on error
]
assert[<ast.BoolOp object at 0x7da18bc71870>]
if compare[name[proxy] is constant[None]] begin[:]
variable[proxy] assign[=] call[name[connect_hostport], parameter[name[hostport]]]
variable[page_schema] assign[=] dictionary[[<ast.Constant object at 0x7da18bc73610>, <ast.Constant object at 0x7da18bc70f40>, <ast.Constant object at 0x7da18bc728c0>], [<ast.Constant object at 0x7da18bc73940>, <ast.Dict object at 0x7da18bc712a0>, <ast.List object at 0x7da18bc71d80>]]
variable[schema] assign[=] call[name[json_response_schema], parameter[name[page_schema]]]
<ast.Try object at 0x7da18bc70c10>
variable[resp] assign[=] dictionary[[], []]
<ast.Try object at 0x7da18bc72ad0>
return[call[name[resp]][constant[names]]] | keyword[def] identifier[get_all_names_page] ( identifier[offset] , identifier[count] , identifier[include_expired] = keyword[False] , identifier[hostport] = keyword[None] , identifier[proxy] = keyword[None] ):
literal[string]
keyword[assert] identifier[proxy] keyword[or] identifier[hostport] , literal[string]
keyword[if] identifier[proxy] keyword[is] keyword[None] :
identifier[proxy] = identifier[connect_hostport] ( identifier[hostport] )
identifier[page_schema] ={
literal[string] : literal[string] ,
literal[string] :{
literal[string] :{
literal[string] : literal[string] ,
literal[string] :{
literal[string] : literal[string] ,
literal[string] : keyword[True]
},
},
},
literal[string] :[
literal[string] ,
],
}
identifier[schema] = identifier[json_response_schema] ( identifier[page_schema] )
keyword[try] :
keyword[assert] identifier[count] <= literal[int] , literal[string] . identifier[format] ( identifier[count] )
keyword[except] identifier[AssertionError] keyword[as] identifier[ae] :
keyword[if] identifier[BLOCKSTACK_DEBUG] :
identifier[log] . identifier[exception] ( identifier[ae] )
keyword[return] { literal[string] : literal[string] , literal[string] : literal[int] }
identifier[resp] ={}
keyword[try] :
keyword[if] identifier[include_expired] :
identifier[resp] = identifier[proxy] . identifier[get_all_names_cumulative] ( identifier[offset] , identifier[count] )
keyword[else] :
identifier[resp] = identifier[proxy] . identifier[get_all_names] ( identifier[offset] , identifier[count] )
identifier[resp] = identifier[json_validate] ( identifier[schema] , identifier[resp] )
keyword[if] identifier[json_is_error] ( identifier[resp] ):
keyword[return] identifier[resp]
identifier[valid_names] =[]
keyword[for] identifier[n] keyword[in] identifier[resp] [ literal[string] ]:
keyword[if] keyword[not] identifier[is_name_valid] ( identifier[str] ( identifier[n] )):
identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[str] ( identifier[n] )))
keyword[else] :
identifier[valid_names] . identifier[append] ( identifier[n] )
identifier[resp] [ literal[string] ]= identifier[valid_names]
keyword[except] identifier[ValidationError] keyword[as] identifier[ve] :
keyword[if] identifier[BLOCKSTACK_DEBUG] :
identifier[log] . identifier[exception] ( identifier[ve] )
identifier[resp] ={ literal[string] : literal[string] , literal[string] : literal[int] }
keyword[return] identifier[resp]
keyword[except] identifier[socket] . identifier[timeout] :
identifier[log] . identifier[error] ( literal[string] )
identifier[resp] ={ literal[string] : literal[string] , literal[string] : literal[int] }
keyword[return] identifier[resp]
keyword[except] identifier[socket] . identifier[error] keyword[as] identifier[se] :
identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[se] . identifier[errno] ))
identifier[resp] ={ literal[string] : literal[string] , literal[string] : literal[int] }
keyword[return] identifier[resp]
keyword[except] identifier[Exception] keyword[as] identifier[ee] :
keyword[if] identifier[BLOCKSTACK_DEBUG] :
identifier[log] . identifier[exception] ( identifier[ee] )
identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[ee] ))
identifier[resp] ={ literal[string] : literal[string] , literal[string] : literal[int] }
keyword[return] identifier[resp]
keyword[return] identifier[resp] [ literal[string] ] | def get_all_names_page(offset, count, include_expired=False, hostport=None, proxy=None):
"""
get a page of all the names
Returns the list of names on success
Returns {'error': ...} on error
"""
assert proxy or hostport, 'Need proxy or hostport'
if proxy is None:
proxy = connect_hostport(hostport) # depends on [control=['if'], data=['proxy']]
page_schema = {'type': 'object', 'properties': {'names': {'type': 'array', 'items': {'type': 'string', 'uniqueItems': True}}}, 'required': ['names']}
schema = json_response_schema(page_schema)
try:
assert count <= 100, 'Page too big: {}'.format(count) # depends on [control=['try'], data=[]]
except AssertionError as ae:
if BLOCKSTACK_DEBUG:
log.exception(ae) # depends on [control=['if'], data=[]]
return {'error': 'Invalid page', 'http_status': 400} # depends on [control=['except'], data=['ae']]
resp = {}
try:
if include_expired:
resp = proxy.get_all_names_cumulative(offset, count) # depends on [control=['if'], data=[]]
else:
resp = proxy.get_all_names(offset, count)
resp = json_validate(schema, resp)
if json_is_error(resp):
return resp # depends on [control=['if'], data=[]]
# must be valid names
valid_names = []
for n in resp['names']:
if not is_name_valid(str(n)):
log.error('Invalid name "{}"'.format(str(n))) # depends on [control=['if'], data=[]]
else:
valid_names.append(n) # depends on [control=['for'], data=['n']]
resp['names'] = valid_names # depends on [control=['try'], data=[]]
except ValidationError as ve:
if BLOCKSTACK_DEBUG:
log.exception(ve) # depends on [control=['if'], data=[]]
resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502}
return resp # depends on [control=['except'], data=['ve']]
except socket.timeout:
log.error('Connection timed out')
resp = {'error': 'Connection to remote host timed out.', 'http_status': 503}
return resp # depends on [control=['except'], data=[]]
except socket.error as se:
log.error('Connection error {}'.format(se.errno))
resp = {'error': 'Connection to remote host failed.', 'http_status': 502}
return resp # depends on [control=['except'], data=['se']]
except Exception as ee:
if BLOCKSTACK_DEBUG:
log.exception(ee) # depends on [control=['if'], data=[]]
log.error('Caught exception while connecting to Blockstack node: {}'.format(ee))
resp = {'error': 'Failed to contact Blockstack node. Try again with `--debug`.', 'http_status': 500}
return resp # depends on [control=['except'], data=['ee']]
return resp['names'] |
def from_directory(input_dir):
"""
Read in a set of FEFF input files from a directory, which is
useful when existing FEFF input needs some adjustment.
"""
sub_d = {}
for fname, ftype in [("HEADER", Header), ("PARAMETERS", Tags)]:
fullzpath = zpath(os.path.join(input_dir, fname))
sub_d[fname.lower()] = ftype.from_file(fullzpath)
# Generation of FEFFDict set requires absorbing atom, need to search
# the index of absorption atom in the structure according to the
# distance matrix and shell species information contained in feff.inp
absorber_index = []
radius = None
feffinp = zpath(os.path.join(input_dir, 'feff.inp'))
if "RECIPROCAL" not in sub_d["parameters"]:
input_atoms = Atoms.cluster_from_file(feffinp)
shell_species = np.array([x.species_string for x in input_atoms])
# First row of distance matrix represents the distance from the absorber to
# the rest atoms
distance_matrix = input_atoms.distance_matrix[0, :]
# Get radius value
from math import ceil
radius = int(ceil(input_atoms.get_distance(input_atoms.index(input_atoms[0]),
input_atoms.index(input_atoms[-1]))))
for site_index, site in enumerate(sub_d['header'].struct):
if site.specie == input_atoms[0].specie:
site_atoms = Atoms(sub_d['header'].struct, absorbing_atom=site_index,
radius=radius)
site_distance = np.array(site_atoms.get_lines())[:, 5].astype(np.float64)
site_shell_species = np.array(site_atoms.get_lines())[:, 4]
shell_overlap = min(shell_species.shape[0], site_shell_species.shape[0])
if np.allclose(distance_matrix[:shell_overlap], site_distance[:shell_overlap]) and \
np.all(site_shell_species[:shell_overlap] == shell_species[:shell_overlap]):
absorber_index.append(site_index)
if "RECIPROCAL" in sub_d["parameters"]:
absorber_index = sub_d["parameters"]["TARGET"]
absorber_index[0] = int(absorber_index[0]) - 1
# Generate the input set
if 'XANES' in sub_d["parameters"]:
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPXANESSet.yaml"))
if radius is None:
radius = 10
return FEFFDictSet(absorber_index[0], sub_d['header'].struct, radius=radius,
config_dict=CONFIG, edge=sub_d["parameters"]["EDGE"],
nkpts=1000, user_tag_settings=sub_d["parameters"]) | def function[from_directory, parameter[input_dir]]:
constant[
Read in a set of FEFF input files from a directory, which is
useful when existing FEFF input needs some adjustment.
]
variable[sub_d] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c990d30>, <ast.Name object at 0x7da20c990cd0>]]] in starred[list[[<ast.Tuple object at 0x7da20c993a30>, <ast.Tuple object at 0x7da20c991d20>]]] begin[:]
variable[fullzpath] assign[=] call[name[zpath], parameter[call[name[os].path.join, parameter[name[input_dir], name[fname]]]]]
call[name[sub_d]][call[name[fname].lower, parameter[]]] assign[=] call[name[ftype].from_file, parameter[name[fullzpath]]]
variable[absorber_index] assign[=] list[[]]
variable[radius] assign[=] constant[None]
variable[feffinp] assign[=] call[name[zpath], parameter[call[name[os].path.join, parameter[name[input_dir], constant[feff.inp]]]]]
if compare[constant[RECIPROCAL] <ast.NotIn object at 0x7da2590d7190> call[name[sub_d]][constant[parameters]]] begin[:]
variable[input_atoms] assign[=] call[name[Atoms].cluster_from_file, parameter[name[feffinp]]]
variable[shell_species] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da204567070>]]
variable[distance_matrix] assign[=] call[name[input_atoms].distance_matrix][tuple[[<ast.Constant object at 0x7da2045670a0>, <ast.Slice object at 0x7da2045644c0>]]]
from relative_module[math] import module[ceil]
variable[radius] assign[=] call[name[int], parameter[call[name[ceil], parameter[call[name[input_atoms].get_distance, parameter[call[name[input_atoms].index, parameter[call[name[input_atoms]][constant[0]]]], call[name[input_atoms].index, parameter[call[name[input_atoms]][<ast.UnaryOp object at 0x7da204564220>]]]]]]]]]
for taget[tuple[[<ast.Name object at 0x7da204564a90>, <ast.Name object at 0x7da204566aa0>]]] in starred[call[name[enumerate], parameter[call[name[sub_d]][constant[header]].struct]]] begin[:]
if compare[name[site].specie equal[==] call[name[input_atoms]][constant[0]].specie] begin[:]
variable[site_atoms] assign[=] call[name[Atoms], parameter[call[name[sub_d]][constant[header]].struct]]
variable[site_distance] assign[=] call[call[call[name[np].array, parameter[call[name[site_atoms].get_lines, parameter[]]]]][tuple[[<ast.Slice object at 0x7da2045667d0>, <ast.Constant object at 0x7da2045658d0>]]].astype, parameter[name[np].float64]]
variable[site_shell_species] assign[=] call[call[name[np].array, parameter[call[name[site_atoms].get_lines, parameter[]]]]][tuple[[<ast.Slice object at 0x7da2045646d0>, <ast.Constant object at 0x7da204565720>]]]
variable[shell_overlap] assign[=] call[name[min], parameter[call[name[shell_species].shape][constant[0]], call[name[site_shell_species].shape][constant[0]]]]
if <ast.BoolOp object at 0x7da204566d70> begin[:]
call[name[absorber_index].append, parameter[name[site_index]]]
if compare[constant[RECIPROCAL] in call[name[sub_d]][constant[parameters]]] begin[:]
variable[absorber_index] assign[=] call[call[name[sub_d]][constant[parameters]]][constant[TARGET]]
call[name[absorber_index]][constant[0]] assign[=] binary_operation[call[name[int], parameter[call[name[absorber_index]][constant[0]]]] - constant[1]]
if compare[constant[XANES] in call[name[sub_d]][constant[parameters]]] begin[:]
variable[CONFIG] assign[=] call[name[loadfn], parameter[call[name[os].path.join, parameter[name[MODULE_DIR], constant[MPXANESSet.yaml]]]]]
if compare[name[radius] is constant[None]] begin[:]
variable[radius] assign[=] constant[10]
return[call[name[FEFFDictSet], parameter[call[name[absorber_index]][constant[0]], call[name[sub_d]][constant[header]].struct]]] | keyword[def] identifier[from_directory] ( identifier[input_dir] ):
literal[string]
identifier[sub_d] ={}
keyword[for] identifier[fname] , identifier[ftype] keyword[in] [( literal[string] , identifier[Header] ),( literal[string] , identifier[Tags] )]:
identifier[fullzpath] = identifier[zpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[input_dir] , identifier[fname] ))
identifier[sub_d] [ identifier[fname] . identifier[lower] ()]= identifier[ftype] . identifier[from_file] ( identifier[fullzpath] )
identifier[absorber_index] =[]
identifier[radius] = keyword[None]
identifier[feffinp] = identifier[zpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[input_dir] , literal[string] ))
keyword[if] literal[string] keyword[not] keyword[in] identifier[sub_d] [ literal[string] ]:
identifier[input_atoms] = identifier[Atoms] . identifier[cluster_from_file] ( identifier[feffinp] )
identifier[shell_species] = identifier[np] . identifier[array] ([ identifier[x] . identifier[species_string] keyword[for] identifier[x] keyword[in] identifier[input_atoms] ])
identifier[distance_matrix] = identifier[input_atoms] . identifier[distance_matrix] [ literal[int] ,:]
keyword[from] identifier[math] keyword[import] identifier[ceil]
identifier[radius] = identifier[int] ( identifier[ceil] ( identifier[input_atoms] . identifier[get_distance] ( identifier[input_atoms] . identifier[index] ( identifier[input_atoms] [ literal[int] ]),
identifier[input_atoms] . identifier[index] ( identifier[input_atoms] [- literal[int] ]))))
keyword[for] identifier[site_index] , identifier[site] keyword[in] identifier[enumerate] ( identifier[sub_d] [ literal[string] ]. identifier[struct] ):
keyword[if] identifier[site] . identifier[specie] == identifier[input_atoms] [ literal[int] ]. identifier[specie] :
identifier[site_atoms] = identifier[Atoms] ( identifier[sub_d] [ literal[string] ]. identifier[struct] , identifier[absorbing_atom] = identifier[site_index] ,
identifier[radius] = identifier[radius] )
identifier[site_distance] = identifier[np] . identifier[array] ( identifier[site_atoms] . identifier[get_lines] ())[:, literal[int] ]. identifier[astype] ( identifier[np] . identifier[float64] )
identifier[site_shell_species] = identifier[np] . identifier[array] ( identifier[site_atoms] . identifier[get_lines] ())[:, literal[int] ]
identifier[shell_overlap] = identifier[min] ( identifier[shell_species] . identifier[shape] [ literal[int] ], identifier[site_shell_species] . identifier[shape] [ literal[int] ])
keyword[if] identifier[np] . identifier[allclose] ( identifier[distance_matrix] [: identifier[shell_overlap] ], identifier[site_distance] [: identifier[shell_overlap] ]) keyword[and] identifier[np] . identifier[all] ( identifier[site_shell_species] [: identifier[shell_overlap] ]== identifier[shell_species] [: identifier[shell_overlap] ]):
identifier[absorber_index] . identifier[append] ( identifier[site_index] )
keyword[if] literal[string] keyword[in] identifier[sub_d] [ literal[string] ]:
identifier[absorber_index] = identifier[sub_d] [ literal[string] ][ literal[string] ]
identifier[absorber_index] [ literal[int] ]= identifier[int] ( identifier[absorber_index] [ literal[int] ])- literal[int]
keyword[if] literal[string] keyword[in] identifier[sub_d] [ literal[string] ]:
identifier[CONFIG] = identifier[loadfn] ( identifier[os] . identifier[path] . identifier[join] ( identifier[MODULE_DIR] , literal[string] ))
keyword[if] identifier[radius] keyword[is] keyword[None] :
identifier[radius] = literal[int]
keyword[return] identifier[FEFFDictSet] ( identifier[absorber_index] [ literal[int] ], identifier[sub_d] [ literal[string] ]. identifier[struct] , identifier[radius] = identifier[radius] ,
identifier[config_dict] = identifier[CONFIG] , identifier[edge] = identifier[sub_d] [ literal[string] ][ literal[string] ],
identifier[nkpts] = literal[int] , identifier[user_tag_settings] = identifier[sub_d] [ literal[string] ]) | def from_directory(input_dir):
"""
Read in a set of FEFF input files from a directory, which is
useful when existing FEFF input needs some adjustment.
"""
sub_d = {}
for (fname, ftype) in [('HEADER', Header), ('PARAMETERS', Tags)]:
fullzpath = zpath(os.path.join(input_dir, fname))
sub_d[fname.lower()] = ftype.from_file(fullzpath) # depends on [control=['for'], data=[]]
# Generation of FEFFDict set requires absorbing atom, need to search
# the index of absorption atom in the structure according to the
# distance matrix and shell species information contained in feff.inp
absorber_index = []
radius = None
feffinp = zpath(os.path.join(input_dir, 'feff.inp'))
if 'RECIPROCAL' not in sub_d['parameters']:
input_atoms = Atoms.cluster_from_file(feffinp)
shell_species = np.array([x.species_string for x in input_atoms])
# First row of distance matrix represents the distance from the absorber to
# the rest atoms
distance_matrix = input_atoms.distance_matrix[0, :]
# Get radius value
from math import ceil
radius = int(ceil(input_atoms.get_distance(input_atoms.index(input_atoms[0]), input_atoms.index(input_atoms[-1]))))
for (site_index, site) in enumerate(sub_d['header'].struct):
if site.specie == input_atoms[0].specie:
site_atoms = Atoms(sub_d['header'].struct, absorbing_atom=site_index, radius=radius)
site_distance = np.array(site_atoms.get_lines())[:, 5].astype(np.float64)
site_shell_species = np.array(site_atoms.get_lines())[:, 4]
shell_overlap = min(shell_species.shape[0], site_shell_species.shape[0])
if np.allclose(distance_matrix[:shell_overlap], site_distance[:shell_overlap]) and np.all(site_shell_species[:shell_overlap] == shell_species[:shell_overlap]):
absorber_index.append(site_index) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if 'RECIPROCAL' in sub_d['parameters']:
absorber_index = sub_d['parameters']['TARGET']
absorber_index[0] = int(absorber_index[0]) - 1 # depends on [control=['if'], data=[]]
# Generate the input set
if 'XANES' in sub_d['parameters']:
CONFIG = loadfn(os.path.join(MODULE_DIR, 'MPXANESSet.yaml'))
if radius is None:
radius = 10 # depends on [control=['if'], data=['radius']]
return FEFFDictSet(absorber_index[0], sub_d['header'].struct, radius=radius, config_dict=CONFIG, edge=sub_d['parameters']['EDGE'], nkpts=1000, user_tag_settings=sub_d['parameters']) # depends on [control=['if'], data=[]] |
def _get_cn_cert():
"""Get the public TLS/SSL X.509 certificate from the root CN of the DataONE
environment. The certificate is used for validating the signature of the JWTs.
If certificate retrieval fails, a new attempt to retrieve the certificate
is performed after the cache expires (settings.CACHES.default.TIMEOUT).
If successful, returns a cryptography.Certificate().
"""
try:
cert_obj = django.core.cache.cache.cn_cert_obj
d1_common.cert.x509.log_cert_info(
logging.debug, 'Using cached CN cert for JWT validation', cert_obj
)
return cert_obj
except AttributeError:
cn_cert_obj = _download_and_decode_cn_cert()
django.core.cache.cache.cn_cert_obj = cn_cert_obj
return cn_cert_obj | def function[_get_cn_cert, parameter[]]:
constant[Get the public TLS/SSL X.509 certificate from the root CN of the DataONE
environment. The certificate is used for validating the signature of the JWTs.
If certificate retrieval fails, a new attempt to retrieve the certificate
is performed after the cache expires (settings.CACHES.default.TIMEOUT).
If successful, returns a cryptography.Certificate().
]
<ast.Try object at 0x7da1b1a2dc60> | keyword[def] identifier[_get_cn_cert] ():
literal[string]
keyword[try] :
identifier[cert_obj] = identifier[django] . identifier[core] . identifier[cache] . identifier[cache] . identifier[cn_cert_obj]
identifier[d1_common] . identifier[cert] . identifier[x509] . identifier[log_cert_info] (
identifier[logging] . identifier[debug] , literal[string] , identifier[cert_obj]
)
keyword[return] identifier[cert_obj]
keyword[except] identifier[AttributeError] :
identifier[cn_cert_obj] = identifier[_download_and_decode_cn_cert] ()
identifier[django] . identifier[core] . identifier[cache] . identifier[cache] . identifier[cn_cert_obj] = identifier[cn_cert_obj]
keyword[return] identifier[cn_cert_obj] | def _get_cn_cert():
"""Get the public TLS/SSL X.509 certificate from the root CN of the DataONE
environment. The certificate is used for validating the signature of the JWTs.
If certificate retrieval fails, a new attempt to retrieve the certificate
is performed after the cache expires (settings.CACHES.default.TIMEOUT).
If successful, returns a cryptography.Certificate().
"""
try:
cert_obj = django.core.cache.cache.cn_cert_obj
d1_common.cert.x509.log_cert_info(logging.debug, 'Using cached CN cert for JWT validation', cert_obj)
return cert_obj # depends on [control=['try'], data=[]]
except AttributeError:
cn_cert_obj = _download_and_decode_cn_cert()
django.core.cache.cache.cn_cert_obj = cn_cert_obj
return cn_cert_obj # depends on [control=['except'], data=[]] |
def _head_temp_file(self, temp_file, num_lines):
""" Returns a list of the first num_lines lines from a temp file. """
if not isinstance(num_lines, int):
raise DagobahError('num_lines must be an integer')
temp_file.seek(0)
result, curr_line = [], 0
for line in temp_file:
curr_line += 1
result.append(line.strip())
if curr_line >= num_lines:
break
return result | def function[_head_temp_file, parameter[self, temp_file, num_lines]]:
constant[ Returns a list of the first num_lines lines from a temp file. ]
if <ast.UnaryOp object at 0x7da1b0be1ed0> begin[:]
<ast.Raise object at 0x7da1b0be0df0>
call[name[temp_file].seek, parameter[constant[0]]]
<ast.Tuple object at 0x7da1b0be12a0> assign[=] tuple[[<ast.List object at 0x7da1b0be0f70>, <ast.Constant object at 0x7da1b0be22c0>]]
for taget[name[line]] in starred[name[temp_file]] begin[:]
<ast.AugAssign object at 0x7da1b0be2e90>
call[name[result].append, parameter[call[name[line].strip, parameter[]]]]
if compare[name[curr_line] greater_or_equal[>=] name[num_lines]] begin[:]
break
return[name[result]] | keyword[def] identifier[_head_temp_file] ( identifier[self] , identifier[temp_file] , identifier[num_lines] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[num_lines] , identifier[int] ):
keyword[raise] identifier[DagobahError] ( literal[string] )
identifier[temp_file] . identifier[seek] ( literal[int] )
identifier[result] , identifier[curr_line] =[], literal[int]
keyword[for] identifier[line] keyword[in] identifier[temp_file] :
identifier[curr_line] += literal[int]
identifier[result] . identifier[append] ( identifier[line] . identifier[strip] ())
keyword[if] identifier[curr_line] >= identifier[num_lines] :
keyword[break]
keyword[return] identifier[result] | def _head_temp_file(self, temp_file, num_lines):
""" Returns a list of the first num_lines lines from a temp file. """
if not isinstance(num_lines, int):
raise DagobahError('num_lines must be an integer') # depends on [control=['if'], data=[]]
temp_file.seek(0)
(result, curr_line) = ([], 0)
for line in temp_file:
curr_line += 1
result.append(line.strip())
if curr_line >= num_lines:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return result |
def get_lyrics_letssingit(song_name):
'''
Scrapes the lyrics of a song since spotify does not provide lyrics
takes song title as arguement
'''
lyrics = ""
url = "http://search.letssingit.com/cgi-exe/am.cgi?a=search&artist_id=&l=archive&s=" + \
quote(song_name.encode('utf-8'))
html = urlopen(url).read()
soup = BeautifulSoup(html, "html.parser")
link = soup.find('a', {'class': 'high_profile'})
try:
link = link.get('href')
link = urlopen(link).read()
soup = BeautifulSoup(link, "html.parser")
try:
lyrics = soup.find('div', {'id': 'lyrics'}).text
lyrics = lyrics[3:]
except AttributeError:
lyrics = ""
except:
lyrics = ""
return lyrics | def function[get_lyrics_letssingit, parameter[song_name]]:
constant[
Scrapes the lyrics of a song since spotify does not provide lyrics
takes song title as arguement
]
variable[lyrics] assign[=] constant[]
variable[url] assign[=] binary_operation[constant[http://search.letssingit.com/cgi-exe/am.cgi?a=search&artist_id=&l=archive&s=] + call[name[quote], parameter[call[name[song_name].encode, parameter[constant[utf-8]]]]]]
variable[html] assign[=] call[call[name[urlopen], parameter[name[url]]].read, parameter[]]
variable[soup] assign[=] call[name[BeautifulSoup], parameter[name[html], constant[html.parser]]]
variable[link] assign[=] call[name[soup].find, parameter[constant[a], dictionary[[<ast.Constant object at 0x7da18f58f490>], [<ast.Constant object at 0x7da18f58d630>]]]]
<ast.Try object at 0x7da18f58e2f0>
return[name[lyrics]] | keyword[def] identifier[get_lyrics_letssingit] ( identifier[song_name] ):
literal[string]
identifier[lyrics] = literal[string]
identifier[url] = literal[string] + identifier[quote] ( identifier[song_name] . identifier[encode] ( literal[string] ))
identifier[html] = identifier[urlopen] ( identifier[url] ). identifier[read] ()
identifier[soup] = identifier[BeautifulSoup] ( identifier[html] , literal[string] )
identifier[link] = identifier[soup] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] })
keyword[try] :
identifier[link] = identifier[link] . identifier[get] ( literal[string] )
identifier[link] = identifier[urlopen] ( identifier[link] ). identifier[read] ()
identifier[soup] = identifier[BeautifulSoup] ( identifier[link] , literal[string] )
keyword[try] :
identifier[lyrics] = identifier[soup] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] }). identifier[text]
identifier[lyrics] = identifier[lyrics] [ literal[int] :]
keyword[except] identifier[AttributeError] :
identifier[lyrics] = literal[string]
keyword[except] :
identifier[lyrics] = literal[string]
keyword[return] identifier[lyrics] | def get_lyrics_letssingit(song_name):
"""
Scrapes the lyrics of a song since spotify does not provide lyrics
takes song title as arguement
"""
lyrics = ''
url = 'http://search.letssingit.com/cgi-exe/am.cgi?a=search&artist_id=&l=archive&s=' + quote(song_name.encode('utf-8'))
html = urlopen(url).read()
soup = BeautifulSoup(html, 'html.parser')
link = soup.find('a', {'class': 'high_profile'})
try:
link = link.get('href')
link = urlopen(link).read()
soup = BeautifulSoup(link, 'html.parser')
try:
lyrics = soup.find('div', {'id': 'lyrics'}).text
lyrics = lyrics[3:] # depends on [control=['try'], data=[]]
except AttributeError:
lyrics = '' # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]]
except:
lyrics = '' # depends on [control=['except'], data=[]]
return lyrics |
def updateDataset(self):
"""Updates the dataset."""
self.__updateDataItem()
self.__countNbRecords()
self.__columns = self.__parseColumns() | def function[updateDataset, parameter[self]]:
constant[Updates the dataset.]
call[name[self].__updateDataItem, parameter[]]
call[name[self].__countNbRecords, parameter[]]
name[self].__columns assign[=] call[name[self].__parseColumns, parameter[]] | keyword[def] identifier[updateDataset] ( identifier[self] ):
literal[string]
identifier[self] . identifier[__updateDataItem] ()
identifier[self] . identifier[__countNbRecords] ()
identifier[self] . identifier[__columns] = identifier[self] . identifier[__parseColumns] () | def updateDataset(self):
"""Updates the dataset."""
self.__updateDataItem()
self.__countNbRecords()
self.__columns = self.__parseColumns() |
def read_wavefront(fname_obj):
"""Returns mesh dictionary along with their material dictionary from a wavefront (.obj and/or .mtl) file."""
fname_mtl = ''
geoms = read_objfile(fname_obj)
for line in open(fname_obj):
if line:
split_line = line.strip().split(' ', 1)
if len(split_line) < 2:
continue
prefix, data = split_line[0], split_line[1]
if 'mtllib' in prefix:
fname_mtl = data
break
if fname_mtl:
materials = read_mtlfile(path.join(path.dirname(fname_obj), fname_mtl))
for geom in geoms.values():
geom['material'] = materials[geom['usemtl']]
return geoms | def function[read_wavefront, parameter[fname_obj]]:
constant[Returns mesh dictionary along with their material dictionary from a wavefront (.obj and/or .mtl) file.]
variable[fname_mtl] assign[=] constant[]
variable[geoms] assign[=] call[name[read_objfile], parameter[name[fname_obj]]]
for taget[name[line]] in starred[call[name[open], parameter[name[fname_obj]]]] begin[:]
if name[line] begin[:]
variable[split_line] assign[=] call[call[name[line].strip, parameter[]].split, parameter[constant[ ], constant[1]]]
if compare[call[name[len], parameter[name[split_line]]] less[<] constant[2]] begin[:]
continue
<ast.Tuple object at 0x7da2045673a0> assign[=] tuple[[<ast.Subscript object at 0x7da204564dc0>, <ast.Subscript object at 0x7da204564b80>]]
if compare[constant[mtllib] in name[prefix]] begin[:]
variable[fname_mtl] assign[=] name[data]
break
if name[fname_mtl] begin[:]
variable[materials] assign[=] call[name[read_mtlfile], parameter[call[name[path].join, parameter[call[name[path].dirname, parameter[name[fname_obj]]], name[fname_mtl]]]]]
for taget[name[geom]] in starred[call[name[geoms].values, parameter[]]] begin[:]
call[name[geom]][constant[material]] assign[=] call[name[materials]][call[name[geom]][constant[usemtl]]]
return[name[geoms]] | keyword[def] identifier[read_wavefront] ( identifier[fname_obj] ):
literal[string]
identifier[fname_mtl] = literal[string]
identifier[geoms] = identifier[read_objfile] ( identifier[fname_obj] )
keyword[for] identifier[line] keyword[in] identifier[open] ( identifier[fname_obj] ):
keyword[if] identifier[line] :
identifier[split_line] = identifier[line] . identifier[strip] (). identifier[split] ( literal[string] , literal[int] )
keyword[if] identifier[len] ( identifier[split_line] )< literal[int] :
keyword[continue]
identifier[prefix] , identifier[data] = identifier[split_line] [ literal[int] ], identifier[split_line] [ literal[int] ]
keyword[if] literal[string] keyword[in] identifier[prefix] :
identifier[fname_mtl] = identifier[data]
keyword[break]
keyword[if] identifier[fname_mtl] :
identifier[materials] = identifier[read_mtlfile] ( identifier[path] . identifier[join] ( identifier[path] . identifier[dirname] ( identifier[fname_obj] ), identifier[fname_mtl] ))
keyword[for] identifier[geom] keyword[in] identifier[geoms] . identifier[values] ():
identifier[geom] [ literal[string] ]= identifier[materials] [ identifier[geom] [ literal[string] ]]
keyword[return] identifier[geoms] | def read_wavefront(fname_obj):
"""Returns mesh dictionary along with their material dictionary from a wavefront (.obj and/or .mtl) file."""
fname_mtl = ''
geoms = read_objfile(fname_obj)
for line in open(fname_obj):
if line:
split_line = line.strip().split(' ', 1)
if len(split_line) < 2:
continue # depends on [control=['if'], data=[]]
(prefix, data) = (split_line[0], split_line[1])
if 'mtllib' in prefix:
fname_mtl = data
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
if fname_mtl:
materials = read_mtlfile(path.join(path.dirname(fname_obj), fname_mtl))
for geom in geoms.values():
geom['material'] = materials[geom['usemtl']] # depends on [control=['for'], data=['geom']] # depends on [control=['if'], data=[]]
return geoms |
def gets(self, conn, key, default=None):
"""Gets a single value from the server together with the cas token.
:param key: ``bytes``, is the key for the item being fetched
:param default: default value if there is no value.
:return: ``bytes``, ``bytes tuple with the value and the cas
"""
values, cas_tokens = yield from self._multi_get(
conn, key, with_cas=True)
return values.get(key, default), cas_tokens.get(key) | def function[gets, parameter[self, conn, key, default]]:
constant[Gets a single value from the server together with the cas token.
:param key: ``bytes``, is the key for the item being fetched
:param default: default value if there is no value.
:return: ``bytes``, ``bytes tuple with the value and the cas
]
<ast.Tuple object at 0x7da1b0d8c220> assign[=] <ast.YieldFrom object at 0x7da1b0d8e860>
return[tuple[[<ast.Call object at 0x7da1b0d8dab0>, <ast.Call object at 0x7da1b0d8d060>]]] | keyword[def] identifier[gets] ( identifier[self] , identifier[conn] , identifier[key] , identifier[default] = keyword[None] ):
literal[string]
identifier[values] , identifier[cas_tokens] = keyword[yield] keyword[from] identifier[self] . identifier[_multi_get] (
identifier[conn] , identifier[key] , identifier[with_cas] = keyword[True] )
keyword[return] identifier[values] . identifier[get] ( identifier[key] , identifier[default] ), identifier[cas_tokens] . identifier[get] ( identifier[key] ) | def gets(self, conn, key, default=None):
"""Gets a single value from the server together with the cas token.
:param key: ``bytes``, is the key for the item being fetched
:param default: default value if there is no value.
:return: ``bytes``, ``bytes tuple with the value and the cas
"""
(values, cas_tokens) = (yield from self._multi_get(conn, key, with_cas=True))
return (values.get(key, default), cas_tokens.get(key)) |
def _parse(self, stream, context, path):
"""Parse until the end of objects data."""
num_players = context._._._.replay.num_players
start = stream.tell()
# Have to read everything to be able to use find()
read_bytes = stream.read()
# Try to find the first marker, a portion of the next player structure
marker_up14 = read_bytes.find(b"\x16\xc6\x00\x00\x00\x21")
marker_up15 = read_bytes.find(b"\x16\xf0\x00\x00\x00\x21")
marker = -1
if marker_up14 > 0 and marker_up15 < 0:
marker = marker_up14
elif marker_up15 > 0 and marker_up14 < 0:
marker = marker_up15
# If it exists, we're not on the last player yet
if marker > 0:
# Backtrack through the player name
count = 0
while struct.unpack("<H", read_bytes[marker-2:marker])[0] != count:
marker -= 1
count += 1
# Backtrack through the rest of the next player structure
backtrack = 43 + num_players
# Otherwise, this is the last player
else:
# Search for the scenario header
marker = read_bytes.find(b"\xf6\x28\x9c\x3f")
# Backtrack through the achievements and initial structure footer
backtrack = ((1817 * (num_players - 1)) + 4 + 19)
# Seek to the position we found
end = start + marker - backtrack
stream.seek(end)
return end | def function[_parse, parameter[self, stream, context, path]]:
constant[Parse until the end of objects data.]
variable[num_players] assign[=] name[context]._._._.replay.num_players
variable[start] assign[=] call[name[stream].tell, parameter[]]
variable[read_bytes] assign[=] call[name[stream].read, parameter[]]
variable[marker_up14] assign[=] call[name[read_bytes].find, parameter[constant[b'\x16\xc6\x00\x00\x00!']]]
variable[marker_up15] assign[=] call[name[read_bytes].find, parameter[constant[b'\x16\xf0\x00\x00\x00!']]]
variable[marker] assign[=] <ast.UnaryOp object at 0x7da1b2597e20>
if <ast.BoolOp object at 0x7da1b2594be0> begin[:]
variable[marker] assign[=] name[marker_up14]
if compare[name[marker] greater[>] constant[0]] begin[:]
variable[count] assign[=] constant[0]
while compare[call[call[name[struct].unpack, parameter[constant[<H], call[name[read_bytes]][<ast.Slice object at 0x7da1b2594f10>]]]][constant[0]] not_equal[!=] name[count]] begin[:]
<ast.AugAssign object at 0x7da1b2595f60>
<ast.AugAssign object at 0x7da1b25960e0>
variable[backtrack] assign[=] binary_operation[constant[43] + name[num_players]]
variable[end] assign[=] binary_operation[binary_operation[name[start] + name[marker]] - name[backtrack]]
call[name[stream].seek, parameter[name[end]]]
return[name[end]] | keyword[def] identifier[_parse] ( identifier[self] , identifier[stream] , identifier[context] , identifier[path] ):
literal[string]
identifier[num_players] = identifier[context] . identifier[_] . identifier[_] . identifier[_] . identifier[replay] . identifier[num_players]
identifier[start] = identifier[stream] . identifier[tell] ()
identifier[read_bytes] = identifier[stream] . identifier[read] ()
identifier[marker_up14] = identifier[read_bytes] . identifier[find] ( literal[string] )
identifier[marker_up15] = identifier[read_bytes] . identifier[find] ( literal[string] )
identifier[marker] =- literal[int]
keyword[if] identifier[marker_up14] > literal[int] keyword[and] identifier[marker_up15] < literal[int] :
identifier[marker] = identifier[marker_up14]
keyword[elif] identifier[marker_up15] > literal[int] keyword[and] identifier[marker_up14] < literal[int] :
identifier[marker] = identifier[marker_up15]
keyword[if] identifier[marker] > literal[int] :
identifier[count] = literal[int]
keyword[while] identifier[struct] . identifier[unpack] ( literal[string] , identifier[read_bytes] [ identifier[marker] - literal[int] : identifier[marker] ])[ literal[int] ]!= identifier[count] :
identifier[marker] -= literal[int]
identifier[count] += literal[int]
identifier[backtrack] = literal[int] + identifier[num_players]
keyword[else] :
identifier[marker] = identifier[read_bytes] . identifier[find] ( literal[string] )
identifier[backtrack] =(( literal[int] *( identifier[num_players] - literal[int] ))+ literal[int] + literal[int] )
identifier[end] = identifier[start] + identifier[marker] - identifier[backtrack]
identifier[stream] . identifier[seek] ( identifier[end] )
keyword[return] identifier[end] | def _parse(self, stream, context, path):
"""Parse until the end of objects data."""
num_players = context._._._.replay.num_players
start = stream.tell()
# Have to read everything to be able to use find()
read_bytes = stream.read()
# Try to find the first marker, a portion of the next player structure
marker_up14 = read_bytes.find(b'\x16\xc6\x00\x00\x00!')
marker_up15 = read_bytes.find(b'\x16\xf0\x00\x00\x00!')
marker = -1
if marker_up14 > 0 and marker_up15 < 0:
marker = marker_up14 # depends on [control=['if'], data=[]]
elif marker_up15 > 0 and marker_up14 < 0:
marker = marker_up15 # depends on [control=['if'], data=[]]
# If it exists, we're not on the last player yet
if marker > 0:
# Backtrack through the player name
count = 0
while struct.unpack('<H', read_bytes[marker - 2:marker])[0] != count:
marker -= 1
count += 1 # depends on [control=['while'], data=['count']]
# Backtrack through the rest of the next player structure
backtrack = 43 + num_players # depends on [control=['if'], data=['marker']]
else:
# Otherwise, this is the last player
# Search for the scenario header
marker = read_bytes.find(b'\xf6(\x9c?')
# Backtrack through the achievements and initial structure footer
backtrack = 1817 * (num_players - 1) + 4 + 19
# Seek to the position we found
end = start + marker - backtrack
stream.seek(end)
return end |
def create_connections(self, connection_map):
'''Create agent connections from a given connection map.
:param dict connection_map:
A map of connections to be created. Dictionary where keys are
agent addresses and values are lists of (addr, attitude)-tuples
suitable for
:meth:`~creamas.core.agent.CreativeAgent.add_connections`.
Only connections for agents in this environment are made.
'''
agents = self.get_agents(addr=False)
rets = []
for a in agents:
if a.addr in connection_map:
r = a.add_connections(connection_map[a.addr])
rets.append(r)
return rets | def function[create_connections, parameter[self, connection_map]]:
constant[Create agent connections from a given connection map.
:param dict connection_map:
A map of connections to be created. Dictionary where keys are
agent addresses and values are lists of (addr, attitude)-tuples
suitable for
:meth:`~creamas.core.agent.CreativeAgent.add_connections`.
Only connections for agents in this environment are made.
]
variable[agents] assign[=] call[name[self].get_agents, parameter[]]
variable[rets] assign[=] list[[]]
for taget[name[a]] in starred[name[agents]] begin[:]
if compare[name[a].addr in name[connection_map]] begin[:]
variable[r] assign[=] call[name[a].add_connections, parameter[call[name[connection_map]][name[a].addr]]]
call[name[rets].append, parameter[name[r]]]
return[name[rets]] | keyword[def] identifier[create_connections] ( identifier[self] , identifier[connection_map] ):
literal[string]
identifier[agents] = identifier[self] . identifier[get_agents] ( identifier[addr] = keyword[False] )
identifier[rets] =[]
keyword[for] identifier[a] keyword[in] identifier[agents] :
keyword[if] identifier[a] . identifier[addr] keyword[in] identifier[connection_map] :
identifier[r] = identifier[a] . identifier[add_connections] ( identifier[connection_map] [ identifier[a] . identifier[addr] ])
identifier[rets] . identifier[append] ( identifier[r] )
keyword[return] identifier[rets] | def create_connections(self, connection_map):
"""Create agent connections from a given connection map.
:param dict connection_map:
A map of connections to be created. Dictionary where keys are
agent addresses and values are lists of (addr, attitude)-tuples
suitable for
:meth:`~creamas.core.agent.CreativeAgent.add_connections`.
Only connections for agents in this environment are made.
"""
agents = self.get_agents(addr=False)
rets = []
for a in agents:
if a.addr in connection_map:
r = a.add_connections(connection_map[a.addr])
rets.append(r) # depends on [control=['if'], data=['connection_map']] # depends on [control=['for'], data=['a']]
return rets |
def new_line(self, tokens, line_end, line_start):
"""a new line has been encountered, process it if necessary"""
if _last_token_on_line_is(tokens, line_end, ";"):
self.add_message("unnecessary-semicolon", line=tokens.start_line(line_end))
line_num = tokens.start_line(line_start)
line = tokens.line(line_start)
if tokens.type(line_start) not in _JUNK_TOKENS:
self._lines[line_num] = line.split("\n")[0]
self.check_lines(line, line_num) | def function[new_line, parameter[self, tokens, line_end, line_start]]:
constant[a new line has been encountered, process it if necessary]
if call[name[_last_token_on_line_is], parameter[name[tokens], name[line_end], constant[;]]] begin[:]
call[name[self].add_message, parameter[constant[unnecessary-semicolon]]]
variable[line_num] assign[=] call[name[tokens].start_line, parameter[name[line_start]]]
variable[line] assign[=] call[name[tokens].line, parameter[name[line_start]]]
if compare[call[name[tokens].type, parameter[name[line_start]]] <ast.NotIn object at 0x7da2590d7190> name[_JUNK_TOKENS]] begin[:]
call[name[self]._lines][name[line_num]] assign[=] call[call[name[line].split, parameter[constant[
]]]][constant[0]]
call[name[self].check_lines, parameter[name[line], name[line_num]]] | keyword[def] identifier[new_line] ( identifier[self] , identifier[tokens] , identifier[line_end] , identifier[line_start] ):
literal[string]
keyword[if] identifier[_last_token_on_line_is] ( identifier[tokens] , identifier[line_end] , literal[string] ):
identifier[self] . identifier[add_message] ( literal[string] , identifier[line] = identifier[tokens] . identifier[start_line] ( identifier[line_end] ))
identifier[line_num] = identifier[tokens] . identifier[start_line] ( identifier[line_start] )
identifier[line] = identifier[tokens] . identifier[line] ( identifier[line_start] )
keyword[if] identifier[tokens] . identifier[type] ( identifier[line_start] ) keyword[not] keyword[in] identifier[_JUNK_TOKENS] :
identifier[self] . identifier[_lines] [ identifier[line_num] ]= identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[self] . identifier[check_lines] ( identifier[line] , identifier[line_num] ) | def new_line(self, tokens, line_end, line_start):
"""a new line has been encountered, process it if necessary"""
if _last_token_on_line_is(tokens, line_end, ';'):
self.add_message('unnecessary-semicolon', line=tokens.start_line(line_end)) # depends on [control=['if'], data=[]]
line_num = tokens.start_line(line_start)
line = tokens.line(line_start)
if tokens.type(line_start) not in _JUNK_TOKENS:
self._lines[line_num] = line.split('\n')[0] # depends on [control=['if'], data=[]]
self.check_lines(line, line_num) |
def cmd_rally(self, args):
'''rally point commands'''
#TODO: add_land arg
if len(args) < 1:
self.print_usage()
return
elif args[0] == "add":
self.cmd_rally_add(args[1:])
elif args[0] == "move":
self.cmd_rally_move(args[1:])
elif args[0] == "clear":
self.rallyloader.clear()
self.mav_param.mavset(self.master,'RALLY_TOTAL',0,3)
elif args[0] == "remove":
if not self.have_list:
print("Please list rally points first")
return
if (len(args) < 2):
print("Usage: rally remove RALLYNUM")
return
self.rallyloader.remove(int(args[1]))
self.send_rally_points()
elif args[0] == "list":
self.list_rally_points()
self.have_list = True
elif args[0] == "load":
if (len(args) < 2):
print("Usage: rally load filename")
return
try:
self.rallyloader.load(args[1])
except Exception as msg:
print("Unable to load %s - %s" % (args[1], msg))
return
self.send_rally_points()
self.have_list = True
print("Loaded %u rally points from %s" % (self.rallyloader.rally_count(), args[1]))
elif args[0] == "save":
if (len(args) < 2):
print("Usage: rally save filename")
return
self.rallyloader.save(args[1])
print("Saved rally file %s" % args[1])
elif args[0] == "alt":
self.cmd_rally_alt(args[1:])
elif args[0] == "land":
if (len(args) >= 2 and args[1] == "abort"):
self.abort_ack_received = False
self.abort_first_send_time = 0
self.abort_alt = self.settings.rally_breakalt
if (len(args) >= 3):
self.abort_alt = int(args[2])
else:
self.master.mav.command_long_send(self.settings.target_system,
self.settings.target_component,
mavutil.mavlink.MAV_CMD_DO_RALLY_LAND,
0, 0, 0, 0, 0, 0, 0, 0)
else:
self.print_usage() | def function[cmd_rally, parameter[self, args]]:
constant[rally point commands]
if compare[call[name[len], parameter[name[args]]] less[<] constant[1]] begin[:]
call[name[self].print_usage, parameter[]]
return[None] | keyword[def] identifier[cmd_rally] ( identifier[self] , identifier[args] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )< literal[int] :
identifier[self] . identifier[print_usage] ()
keyword[return]
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
identifier[self] . identifier[cmd_rally_add] ( identifier[args] [ literal[int] :])
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
identifier[self] . identifier[cmd_rally_move] ( identifier[args] [ literal[int] :])
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
identifier[self] . identifier[rallyloader] . identifier[clear] ()
identifier[self] . identifier[mav_param] . identifier[mavset] ( identifier[self] . identifier[master] , literal[string] , literal[int] , literal[int] )
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
keyword[if] keyword[not] identifier[self] . identifier[have_list] :
identifier[print] ( literal[string] )
keyword[return]
keyword[if] ( identifier[len] ( identifier[args] )< literal[int] ):
identifier[print] ( literal[string] )
keyword[return]
identifier[self] . identifier[rallyloader] . identifier[remove] ( identifier[int] ( identifier[args] [ literal[int] ]))
identifier[self] . identifier[send_rally_points] ()
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
identifier[self] . identifier[list_rally_points] ()
identifier[self] . identifier[have_list] = keyword[True]
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
keyword[if] ( identifier[len] ( identifier[args] )< literal[int] ):
identifier[print] ( literal[string] )
keyword[return]
keyword[try] :
identifier[self] . identifier[rallyloader] . identifier[load] ( identifier[args] [ literal[int] ])
keyword[except] identifier[Exception] keyword[as] identifier[msg] :
identifier[print] ( literal[string] %( identifier[args] [ literal[int] ], identifier[msg] ))
keyword[return]
identifier[self] . identifier[send_rally_points] ()
identifier[self] . identifier[have_list] = keyword[True]
identifier[print] ( literal[string] %( identifier[self] . identifier[rallyloader] . identifier[rally_count] (), identifier[args] [ literal[int] ]))
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
keyword[if] ( identifier[len] ( identifier[args] )< literal[int] ):
identifier[print] ( literal[string] )
keyword[return]
identifier[self] . identifier[rallyloader] . identifier[save] ( identifier[args] [ literal[int] ])
identifier[print] ( literal[string] % identifier[args] [ literal[int] ])
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
identifier[self] . identifier[cmd_rally_alt] ( identifier[args] [ literal[int] :])
keyword[elif] identifier[args] [ literal[int] ]== literal[string] :
keyword[if] ( identifier[len] ( identifier[args] )>= literal[int] keyword[and] identifier[args] [ literal[int] ]== literal[string] ):
identifier[self] . identifier[abort_ack_received] = keyword[False]
identifier[self] . identifier[abort_first_send_time] = literal[int]
identifier[self] . identifier[abort_alt] = identifier[self] . identifier[settings] . identifier[rally_breakalt]
keyword[if] ( identifier[len] ( identifier[args] )>= literal[int] ):
identifier[self] . identifier[abort_alt] = identifier[int] ( identifier[args] [ literal[int] ])
keyword[else] :
identifier[self] . identifier[master] . identifier[mav] . identifier[command_long_send] ( identifier[self] . identifier[settings] . identifier[target_system] ,
identifier[self] . identifier[settings] . identifier[target_component] ,
identifier[mavutil] . identifier[mavlink] . identifier[MAV_CMD_DO_RALLY_LAND] ,
literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] )
keyword[else] :
identifier[self] . identifier[print_usage] () | def cmd_rally(self, args):
"""rally point commands"""
#TODO: add_land arg
if len(args) < 1:
self.print_usage()
return # depends on [control=['if'], data=[]]
elif args[0] == 'add':
self.cmd_rally_add(args[1:]) # depends on [control=['if'], data=[]]
elif args[0] == 'move':
self.cmd_rally_move(args[1:]) # depends on [control=['if'], data=[]]
elif args[0] == 'clear':
self.rallyloader.clear()
self.mav_param.mavset(self.master, 'RALLY_TOTAL', 0, 3) # depends on [control=['if'], data=[]]
elif args[0] == 'remove':
if not self.have_list:
print('Please list rally points first')
return # depends on [control=['if'], data=[]]
if len(args) < 2:
print('Usage: rally remove RALLYNUM')
return # depends on [control=['if'], data=[]]
self.rallyloader.remove(int(args[1]))
self.send_rally_points() # depends on [control=['if'], data=[]]
elif args[0] == 'list':
self.list_rally_points()
self.have_list = True # depends on [control=['if'], data=[]]
elif args[0] == 'load':
if len(args) < 2:
print('Usage: rally load filename')
return # depends on [control=['if'], data=[]]
try:
self.rallyloader.load(args[1]) # depends on [control=['try'], data=[]]
except Exception as msg:
print('Unable to load %s - %s' % (args[1], msg))
return # depends on [control=['except'], data=['msg']]
self.send_rally_points()
self.have_list = True
print('Loaded %u rally points from %s' % (self.rallyloader.rally_count(), args[1])) # depends on [control=['if'], data=[]]
elif args[0] == 'save':
if len(args) < 2:
print('Usage: rally save filename')
return # depends on [control=['if'], data=[]]
self.rallyloader.save(args[1])
print('Saved rally file %s' % args[1]) # depends on [control=['if'], data=[]]
elif args[0] == 'alt':
self.cmd_rally_alt(args[1:]) # depends on [control=['if'], data=[]]
elif args[0] == 'land':
if len(args) >= 2 and args[1] == 'abort':
self.abort_ack_received = False
self.abort_first_send_time = 0
self.abort_alt = self.settings.rally_breakalt
if len(args) >= 3:
self.abort_alt = int(args[2]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
self.master.mav.command_long_send(self.settings.target_system, self.settings.target_component, mavutil.mavlink.MAV_CMD_DO_RALLY_LAND, 0, 0, 0, 0, 0, 0, 0, 0) # depends on [control=['if'], data=[]]
else:
self.print_usage() |
def reset_creation_info(self):
"""
Resets builder state to allow building new creation info."""
# FIXME: this state does not make sense
self.created_date_set = False
self.creation_comment_set = False
self.lics_list_ver_set = False | def function[reset_creation_info, parameter[self]]:
constant[
Resets builder state to allow building new creation info.]
name[self].created_date_set assign[=] constant[False]
name[self].creation_comment_set assign[=] constant[False]
name[self].lics_list_ver_set assign[=] constant[False] | keyword[def] identifier[reset_creation_info] ( identifier[self] ):
literal[string]
identifier[self] . identifier[created_date_set] = keyword[False]
identifier[self] . identifier[creation_comment_set] = keyword[False]
identifier[self] . identifier[lics_list_ver_set] = keyword[False] | def reset_creation_info(self):
"""
Resets builder state to allow building new creation info."""
# FIXME: this state does not make sense
self.created_date_set = False
self.creation_comment_set = False
self.lics_list_ver_set = False |
def update(zone, name, ttl, rdtype, data, nameserver='127.0.0.1', timeout=5,
replace=False, port=53, **kwargs):
'''
Add, replace, or update a DNS record.
nameserver must be an IP address and the minion running this module
must have update privileges on that server.
If replace is true, first deletes all records for this name and type.
CLI Example:
.. code-block:: bash
salt ns1 ddns.update example.com host1 60 A 10.0.0.1
'''
name = six.text_type(name)
if name[-1:] == '.':
fqdn = name
else:
fqdn = '{0}.{1}'.format(name, zone)
request = dns.message.make_query(fqdn, rdtype)
answer = dns.query.udp(request, nameserver, timeout, port)
rdtype = dns.rdatatype.from_text(rdtype)
rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data)
keyring = _get_keyring(_config('keyfile', **kwargs))
keyname = _config('keyname', **kwargs)
keyalgorithm = _config('keyalgorithm',
**kwargs) or 'HMAC-MD5.SIG-ALG.REG.INT'
is_exist = False
for rrset in answer.answer:
if rdata in rrset.items:
if ttl == rrset.ttl:
if len(answer.answer) >= 1 or len(rrset.items) >= 1:
is_exist = True
break
dns_update = dns.update.Update(zone, keyring=keyring, keyname=keyname,
keyalgorithm=keyalgorithm)
if replace:
dns_update.replace(name, ttl, rdata)
elif not is_exist:
dns_update.add(name, ttl, rdata)
else:
return None
answer = dns.query.udp(dns_update, nameserver, timeout, port)
if answer.rcode() > 0:
return False
return True | def function[update, parameter[zone, name, ttl, rdtype, data, nameserver, timeout, replace, port]]:
constant[
Add, replace, or update a DNS record.
nameserver must be an IP address and the minion running this module
must have update privileges on that server.
If replace is true, first deletes all records for this name and type.
CLI Example:
.. code-block:: bash
salt ns1 ddns.update example.com host1 60 A 10.0.0.1
]
variable[name] assign[=] call[name[six].text_type, parameter[name[name]]]
if compare[call[name[name]][<ast.Slice object at 0x7da18f09f910>] equal[==] constant[.]] begin[:]
variable[fqdn] assign[=] name[name]
variable[request] assign[=] call[name[dns].message.make_query, parameter[name[fqdn], name[rdtype]]]
variable[answer] assign[=] call[name[dns].query.udp, parameter[name[request], name[nameserver], name[timeout], name[port]]]
variable[rdtype] assign[=] call[name[dns].rdatatype.from_text, parameter[name[rdtype]]]
variable[rdata] assign[=] call[name[dns].rdata.from_text, parameter[name[dns].rdataclass.IN, name[rdtype], name[data]]]
variable[keyring] assign[=] call[name[_get_keyring], parameter[call[name[_config], parameter[constant[keyfile]]]]]
variable[keyname] assign[=] call[name[_config], parameter[constant[keyname]]]
variable[keyalgorithm] assign[=] <ast.BoolOp object at 0x7da18f09ed10>
variable[is_exist] assign[=] constant[False]
for taget[name[rrset]] in starred[name[answer].answer] begin[:]
if compare[name[rdata] in name[rrset].items] begin[:]
if compare[name[ttl] equal[==] name[rrset].ttl] begin[:]
if <ast.BoolOp object at 0x7da18f09cfd0> begin[:]
variable[is_exist] assign[=] constant[True]
break
variable[dns_update] assign[=] call[name[dns].update.Update, parameter[name[zone]]]
if name[replace] begin[:]
call[name[dns_update].replace, parameter[name[name], name[ttl], name[rdata]]]
variable[answer] assign[=] call[name[dns].query.udp, parameter[name[dns_update], name[nameserver], name[timeout], name[port]]]
if compare[call[name[answer].rcode, parameter[]] greater[>] constant[0]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[update] ( identifier[zone] , identifier[name] , identifier[ttl] , identifier[rdtype] , identifier[data] , identifier[nameserver] = literal[string] , identifier[timeout] = literal[int] ,
identifier[replace] = keyword[False] , identifier[port] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[name] = identifier[six] . identifier[text_type] ( identifier[name] )
keyword[if] identifier[name] [- literal[int] :]== literal[string] :
identifier[fqdn] = identifier[name]
keyword[else] :
identifier[fqdn] = literal[string] . identifier[format] ( identifier[name] , identifier[zone] )
identifier[request] = identifier[dns] . identifier[message] . identifier[make_query] ( identifier[fqdn] , identifier[rdtype] )
identifier[answer] = identifier[dns] . identifier[query] . identifier[udp] ( identifier[request] , identifier[nameserver] , identifier[timeout] , identifier[port] )
identifier[rdtype] = identifier[dns] . identifier[rdatatype] . identifier[from_text] ( identifier[rdtype] )
identifier[rdata] = identifier[dns] . identifier[rdata] . identifier[from_text] ( identifier[dns] . identifier[rdataclass] . identifier[IN] , identifier[rdtype] , identifier[data] )
identifier[keyring] = identifier[_get_keyring] ( identifier[_config] ( literal[string] ,** identifier[kwargs] ))
identifier[keyname] = identifier[_config] ( literal[string] ,** identifier[kwargs] )
identifier[keyalgorithm] = identifier[_config] ( literal[string] ,
** identifier[kwargs] ) keyword[or] literal[string]
identifier[is_exist] = keyword[False]
keyword[for] identifier[rrset] keyword[in] identifier[answer] . identifier[answer] :
keyword[if] identifier[rdata] keyword[in] identifier[rrset] . identifier[items] :
keyword[if] identifier[ttl] == identifier[rrset] . identifier[ttl] :
keyword[if] identifier[len] ( identifier[answer] . identifier[answer] )>= literal[int] keyword[or] identifier[len] ( identifier[rrset] . identifier[items] )>= literal[int] :
identifier[is_exist] = keyword[True]
keyword[break]
identifier[dns_update] = identifier[dns] . identifier[update] . identifier[Update] ( identifier[zone] , identifier[keyring] = identifier[keyring] , identifier[keyname] = identifier[keyname] ,
identifier[keyalgorithm] = identifier[keyalgorithm] )
keyword[if] identifier[replace] :
identifier[dns_update] . identifier[replace] ( identifier[name] , identifier[ttl] , identifier[rdata] )
keyword[elif] keyword[not] identifier[is_exist] :
identifier[dns_update] . identifier[add] ( identifier[name] , identifier[ttl] , identifier[rdata] )
keyword[else] :
keyword[return] keyword[None]
identifier[answer] = identifier[dns] . identifier[query] . identifier[udp] ( identifier[dns_update] , identifier[nameserver] , identifier[timeout] , identifier[port] )
keyword[if] identifier[answer] . identifier[rcode] ()> literal[int] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def update(zone, name, ttl, rdtype, data, nameserver='127.0.0.1', timeout=5, replace=False, port=53, **kwargs):
"""
Add, replace, or update a DNS record.
nameserver must be an IP address and the minion running this module
must have update privileges on that server.
If replace is true, first deletes all records for this name and type.
CLI Example:
.. code-block:: bash
salt ns1 ddns.update example.com host1 60 A 10.0.0.1
"""
name = six.text_type(name)
if name[-1:] == '.':
fqdn = name # depends on [control=['if'], data=[]]
else:
fqdn = '{0}.{1}'.format(name, zone)
request = dns.message.make_query(fqdn, rdtype)
answer = dns.query.udp(request, nameserver, timeout, port)
rdtype = dns.rdatatype.from_text(rdtype)
rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data)
keyring = _get_keyring(_config('keyfile', **kwargs))
keyname = _config('keyname', **kwargs)
keyalgorithm = _config('keyalgorithm', **kwargs) or 'HMAC-MD5.SIG-ALG.REG.INT'
is_exist = False
for rrset in answer.answer:
if rdata in rrset.items:
if ttl == rrset.ttl:
if len(answer.answer) >= 1 or len(rrset.items) >= 1:
is_exist = True
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rrset']]
dns_update = dns.update.Update(zone, keyring=keyring, keyname=keyname, keyalgorithm=keyalgorithm)
if replace:
dns_update.replace(name, ttl, rdata) # depends on [control=['if'], data=[]]
elif not is_exist:
dns_update.add(name, ttl, rdata) # depends on [control=['if'], data=[]]
else:
return None
answer = dns.query.udp(dns_update, nameserver, timeout, port)
if answer.rcode() > 0:
return False # depends on [control=['if'], data=[]]
return True |
def format_output(groups, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}\t{5}'
for g in groups['AutoScalingGroups']:
out.append(line_format.format(
g['AutoScalingGroupName'],
g['LaunchConfigurationName'],
'desired:'+str(g['DesiredCapacity']),
'max:'+str(g['MaxSize']),
'min:'+str(g['MinSize']),
g['CreatedTime'].strftime('%Y/%m/%d %H:%M:%S'),
))
return out | def function[format_output, parameter[groups, flag]]:
constant[return formatted string for instance]
variable[out] assign[=] list[[]]
variable[line_format] assign[=] constant[{0} {1} {2} {3} {4} {5}]
for taget[name[g]] in starred[call[name[groups]][constant[AutoScalingGroups]]] begin[:]
call[name[out].append, parameter[call[name[line_format].format, parameter[call[name[g]][constant[AutoScalingGroupName]], call[name[g]][constant[LaunchConfigurationName]], binary_operation[constant[desired:] + call[name[str], parameter[call[name[g]][constant[DesiredCapacity]]]]], binary_operation[constant[max:] + call[name[str], parameter[call[name[g]][constant[MaxSize]]]]], binary_operation[constant[min:] + call[name[str], parameter[call[name[g]][constant[MinSize]]]]], call[call[name[g]][constant[CreatedTime]].strftime, parameter[constant[%Y/%m/%d %H:%M:%S]]]]]]]
return[name[out]] | keyword[def] identifier[format_output] ( identifier[groups] , identifier[flag] ):
literal[string]
identifier[out] =[]
identifier[line_format] = literal[string]
keyword[for] identifier[g] keyword[in] identifier[groups] [ literal[string] ]:
identifier[out] . identifier[append] ( identifier[line_format] . identifier[format] (
identifier[g] [ literal[string] ],
identifier[g] [ literal[string] ],
literal[string] + identifier[str] ( identifier[g] [ literal[string] ]),
literal[string] + identifier[str] ( identifier[g] [ literal[string] ]),
literal[string] + identifier[str] ( identifier[g] [ literal[string] ]),
identifier[g] [ literal[string] ]. identifier[strftime] ( literal[string] ),
))
keyword[return] identifier[out] | def format_output(groups, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}\t{5}'
for g in groups['AutoScalingGroups']:
out.append(line_format.format(g['AutoScalingGroupName'], g['LaunchConfigurationName'], 'desired:' + str(g['DesiredCapacity']), 'max:' + str(g['MaxSize']), 'min:' + str(g['MinSize']), g['CreatedTime'].strftime('%Y/%m/%d %H:%M:%S'))) # depends on [control=['for'], data=['g']]
return out |
def from_export(cls, endpoint):
# type: (ExportEndpoint) -> EndpointDescription
"""
Converts an ExportEndpoint bean to an EndpointDescription
:param endpoint: An ExportEndpoint bean
:return: An EndpointDescription bean
"""
assert isinstance(endpoint, ExportEndpoint)
# Service properties
properties = endpoint.get_properties()
# Set import keys
properties[pelix.remote.PROP_ENDPOINT_ID] = endpoint.uid
properties[pelix.remote.PROP_IMPORTED_CONFIGS] = endpoint.configurations
properties[
pelix.remote.PROP_EXPORTED_INTERFACES
] = endpoint.specifications
# Remove export keys
for key in (
pelix.remote.PROP_EXPORTED_CONFIGS,
pelix.remote.PROP_EXPORTED_INTERFACES,
pelix.remote.PROP_EXPORTED_INTENTS,
pelix.remote.PROP_EXPORTED_INTENTS_EXTRA,
):
try:
del properties[key]
except KeyError:
pass
# Other information
properties[pelix.remote.PROP_ENDPOINT_NAME] = endpoint.name
properties[
pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID
] = endpoint.framework
return EndpointDescription(None, properties) | def function[from_export, parameter[cls, endpoint]]:
constant[
Converts an ExportEndpoint bean to an EndpointDescription
:param endpoint: An ExportEndpoint bean
:return: An EndpointDescription bean
]
assert[call[name[isinstance], parameter[name[endpoint], name[ExportEndpoint]]]]
variable[properties] assign[=] call[name[endpoint].get_properties, parameter[]]
call[name[properties]][name[pelix].remote.PROP_ENDPOINT_ID] assign[=] name[endpoint].uid
call[name[properties]][name[pelix].remote.PROP_IMPORTED_CONFIGS] assign[=] name[endpoint].configurations
call[name[properties]][name[pelix].remote.PROP_EXPORTED_INTERFACES] assign[=] name[endpoint].specifications
for taget[name[key]] in starred[tuple[[<ast.Attribute object at 0x7da18f721ea0>, <ast.Attribute object at 0x7da18f723250>, <ast.Attribute object at 0x7da18f7226b0>, <ast.Attribute object at 0x7da18f721450>]]] begin[:]
<ast.Try object at 0x7da18f721660>
call[name[properties]][name[pelix].remote.PROP_ENDPOINT_NAME] assign[=] name[endpoint].name
call[name[properties]][name[pelix].remote.PROP_ENDPOINT_FRAMEWORK_UUID] assign[=] name[endpoint].framework
return[call[name[EndpointDescription], parameter[constant[None], name[properties]]]] | keyword[def] identifier[from_export] ( identifier[cls] , identifier[endpoint] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[endpoint] , identifier[ExportEndpoint] )
identifier[properties] = identifier[endpoint] . identifier[get_properties] ()
identifier[properties] [ identifier[pelix] . identifier[remote] . identifier[PROP_ENDPOINT_ID] ]= identifier[endpoint] . identifier[uid]
identifier[properties] [ identifier[pelix] . identifier[remote] . identifier[PROP_IMPORTED_CONFIGS] ]= identifier[endpoint] . identifier[configurations]
identifier[properties] [
identifier[pelix] . identifier[remote] . identifier[PROP_EXPORTED_INTERFACES]
]= identifier[endpoint] . identifier[specifications]
keyword[for] identifier[key] keyword[in] (
identifier[pelix] . identifier[remote] . identifier[PROP_EXPORTED_CONFIGS] ,
identifier[pelix] . identifier[remote] . identifier[PROP_EXPORTED_INTERFACES] ,
identifier[pelix] . identifier[remote] . identifier[PROP_EXPORTED_INTENTS] ,
identifier[pelix] . identifier[remote] . identifier[PROP_EXPORTED_INTENTS_EXTRA] ,
):
keyword[try] :
keyword[del] identifier[properties] [ identifier[key] ]
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[properties] [ identifier[pelix] . identifier[remote] . identifier[PROP_ENDPOINT_NAME] ]= identifier[endpoint] . identifier[name]
identifier[properties] [
identifier[pelix] . identifier[remote] . identifier[PROP_ENDPOINT_FRAMEWORK_UUID]
]= identifier[endpoint] . identifier[framework]
keyword[return] identifier[EndpointDescription] ( keyword[None] , identifier[properties] ) | def from_export(cls, endpoint):
# type: (ExportEndpoint) -> EndpointDescription
'\n Converts an ExportEndpoint bean to an EndpointDescription\n\n :param endpoint: An ExportEndpoint bean\n :return: An EndpointDescription bean\n '
assert isinstance(endpoint, ExportEndpoint)
# Service properties
properties = endpoint.get_properties()
# Set import keys
properties[pelix.remote.PROP_ENDPOINT_ID] = endpoint.uid
properties[pelix.remote.PROP_IMPORTED_CONFIGS] = endpoint.configurations
properties[pelix.remote.PROP_EXPORTED_INTERFACES] = endpoint.specifications
# Remove export keys
for key in (pelix.remote.PROP_EXPORTED_CONFIGS, pelix.remote.PROP_EXPORTED_INTERFACES, pelix.remote.PROP_EXPORTED_INTENTS, pelix.remote.PROP_EXPORTED_INTENTS_EXTRA):
try:
del properties[key] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['key']]
# Other information
properties[pelix.remote.PROP_ENDPOINT_NAME] = endpoint.name
properties[pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID] = endpoint.framework
return EndpointDescription(None, properties) |
def apply_transformations(collection, transformations, select=None):
''' Apply all transformations to the variables in the collection.
Args:
transformations (list): List of transformations to apply.
select (list): Optional list of names of variables to retain after all
transformations are applied.
'''
for t in transformations:
kwargs = dict(t)
func = kwargs.pop('name')
cols = kwargs.pop('input', None)
if isinstance(func, string_types):
if func in ('and', 'or'):
func += '_'
if not hasattr(transform, func):
raise ValueError("No transformation '%s' found!" % func)
func = getattr(transform, func)
func(collection, cols, **kwargs)
if select is not None:
transform.Select(collection, select)
return collection | def function[apply_transformations, parameter[collection, transformations, select]]:
constant[ Apply all transformations to the variables in the collection.
Args:
transformations (list): List of transformations to apply.
select (list): Optional list of names of variables to retain after all
transformations are applied.
]
for taget[name[t]] in starred[name[transformations]] begin[:]
variable[kwargs] assign[=] call[name[dict], parameter[name[t]]]
variable[func] assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[cols] assign[=] call[name[kwargs].pop, parameter[constant[input], constant[None]]]
if call[name[isinstance], parameter[name[func], name[string_types]]] begin[:]
if compare[name[func] in tuple[[<ast.Constant object at 0x7da1b12b6050>, <ast.Constant object at 0x7da1b12b5e70>]]] begin[:]
<ast.AugAssign object at 0x7da1b12b6200>
if <ast.UnaryOp object at 0x7da1b12b40a0> begin[:]
<ast.Raise object at 0x7da1b12b65f0>
variable[func] assign[=] call[name[getattr], parameter[name[transform], name[func]]]
call[name[func], parameter[name[collection], name[cols]]]
if compare[name[select] is_not constant[None]] begin[:]
call[name[transform].Select, parameter[name[collection], name[select]]]
return[name[collection]] | keyword[def] identifier[apply_transformations] ( identifier[collection] , identifier[transformations] , identifier[select] = keyword[None] ):
literal[string]
keyword[for] identifier[t] keyword[in] identifier[transformations] :
identifier[kwargs] = identifier[dict] ( identifier[t] )
identifier[func] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[cols] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[isinstance] ( identifier[func] , identifier[string_types] ):
keyword[if] identifier[func] keyword[in] ( literal[string] , literal[string] ):
identifier[func] += literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[transform] , identifier[func] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[func] )
identifier[func] = identifier[getattr] ( identifier[transform] , identifier[func] )
identifier[func] ( identifier[collection] , identifier[cols] ,** identifier[kwargs] )
keyword[if] identifier[select] keyword[is] keyword[not] keyword[None] :
identifier[transform] . identifier[Select] ( identifier[collection] , identifier[select] )
keyword[return] identifier[collection] | def apply_transformations(collection, transformations, select=None):
""" Apply all transformations to the variables in the collection.
Args:
transformations (list): List of transformations to apply.
select (list): Optional list of names of variables to retain after all
transformations are applied.
"""
for t in transformations:
kwargs = dict(t)
func = kwargs.pop('name')
cols = kwargs.pop('input', None)
if isinstance(func, string_types):
if func in ('and', 'or'):
func += '_' # depends on [control=['if'], data=['func']]
if not hasattr(transform, func):
raise ValueError("No transformation '%s' found!" % func) # depends on [control=['if'], data=[]]
func = getattr(transform, func)
func(collection, cols, **kwargs) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']]
if select is not None:
transform.Select(collection, select) # depends on [control=['if'], data=['select']]
return collection |
def handler_for_name(fq_name):
"""Resolves and instantiates handler by fully qualified name.
First resolves the name using for_name call. Then if it resolves to a class,
instantiates a class, if it resolves to a method - instantiates the class and
binds method to the instance.
Args:
fq_name: fully qualified name of something to find.
Returns:
handler instance which is ready to be called.
"""
resolved_name = for_name(fq_name)
if isinstance(resolved_name, (type, types.ClassType)):
# create new instance if this is type
return resolved_name()
elif isinstance(resolved_name, types.MethodType):
# bind the method
return getattr(resolved_name.im_class(), resolved_name.__name__)
else:
return resolved_name | def function[handler_for_name, parameter[fq_name]]:
constant[Resolves and instantiates handler by fully qualified name.
First resolves the name using for_name call. Then if it resolves to a class,
instantiates a class, if it resolves to a method - instantiates the class and
binds method to the instance.
Args:
fq_name: fully qualified name of something to find.
Returns:
handler instance which is ready to be called.
]
variable[resolved_name] assign[=] call[name[for_name], parameter[name[fq_name]]]
if call[name[isinstance], parameter[name[resolved_name], tuple[[<ast.Name object at 0x7da18eb55060>, <ast.Attribute object at 0x7da18eb557e0>]]]] begin[:]
return[call[name[resolved_name], parameter[]]] | keyword[def] identifier[handler_for_name] ( identifier[fq_name] ):
literal[string]
identifier[resolved_name] = identifier[for_name] ( identifier[fq_name] )
keyword[if] identifier[isinstance] ( identifier[resolved_name] ,( identifier[type] , identifier[types] . identifier[ClassType] )):
keyword[return] identifier[resolved_name] ()
keyword[elif] identifier[isinstance] ( identifier[resolved_name] , identifier[types] . identifier[MethodType] ):
keyword[return] identifier[getattr] ( identifier[resolved_name] . identifier[im_class] (), identifier[resolved_name] . identifier[__name__] )
keyword[else] :
keyword[return] identifier[resolved_name] | def handler_for_name(fq_name):
"""Resolves and instantiates handler by fully qualified name.
First resolves the name using for_name call. Then if it resolves to a class,
instantiates a class, if it resolves to a method - instantiates the class and
binds method to the instance.
Args:
fq_name: fully qualified name of something to find.
Returns:
handler instance which is ready to be called.
"""
resolved_name = for_name(fq_name)
if isinstance(resolved_name, (type, types.ClassType)):
# create new instance if this is type
return resolved_name() # depends on [control=['if'], data=[]]
elif isinstance(resolved_name, types.MethodType):
# bind the method
return getattr(resolved_name.im_class(), resolved_name.__name__) # depends on [control=['if'], data=[]]
else:
return resolved_name |
def get_module_environment(env=None, function=None):
'''
Get module optional environment.
To setup an environment option for a particular module,
add either pillar or config at the minion as follows:
system-environment:
modules:
pkg:
_:
LC_ALL: en_GB.UTF-8
FOO: bar
install:
HELLO: world
states:
pkg:
_:
LC_ALL: en_US.Latin-1
NAME: Fred
So this will export the environment to all the modules,
states, returnes etc. And calling this function with the globals()
in that context will fetch the environment for further reuse.
Underscore '_' exports environment for all functions within the module.
If you want to specifially export environment only for one function,
specify it as in the example above "install".
First will be fetched configuration, where virtual name goes first,
then the physical name of the module overrides the virtual settings.
Then pillar settings will override the configuration in the same order.
:param env:
:param function: name of a particular function
:return: dict
'''
result = {}
if not env:
env = {}
for env_src in [env.get('__opts__', {}), env.get('__pillar__', {})]:
fname = env.get('__file__', '')
physical_name = os.path.basename(fname).split('.')[0]
section = os.path.basename(os.path.dirname(fname))
m_names = [env.get('__virtualname__')]
if physical_name not in m_names:
m_names.append(physical_name)
for m_name in m_names:
if not m_name:
continue
result.update(env_src.get('system-environment', {}).get(
section, {}).get(m_name, {}).get('_', {}).copy())
if function is not None:
result.update(env_src.get('system-environment', {}).get(
section, {}).get(m_name, {}).get(function, {}).copy())
return result | def function[get_module_environment, parameter[env, function]]:
constant[
Get module optional environment.
To setup an environment option for a particular module,
add either pillar or config at the minion as follows:
system-environment:
modules:
pkg:
_:
LC_ALL: en_GB.UTF-8
FOO: bar
install:
HELLO: world
states:
pkg:
_:
LC_ALL: en_US.Latin-1
NAME: Fred
So this will export the environment to all the modules,
states, returnes etc. And calling this function with the globals()
in that context will fetch the environment for further reuse.
Underscore '_' exports environment for all functions within the module.
If you want to specifially export environment only for one function,
specify it as in the example above "install".
First will be fetched configuration, where virtual name goes first,
then the physical name of the module overrides the virtual settings.
Then pillar settings will override the configuration in the same order.
:param env:
:param function: name of a particular function
:return: dict
]
variable[result] assign[=] dictionary[[], []]
if <ast.UnaryOp object at 0x7da18f58f790> begin[:]
variable[env] assign[=] dictionary[[], []]
for taget[name[env_src]] in starred[list[[<ast.Call object at 0x7da18f58e9b0>, <ast.Call object at 0x7da18f58d5d0>]]] begin[:]
variable[fname] assign[=] call[name[env].get, parameter[constant[__file__], constant[]]]
variable[physical_name] assign[=] call[call[call[name[os].path.basename, parameter[name[fname]]].split, parameter[constant[.]]]][constant[0]]
variable[section] assign[=] call[name[os].path.basename, parameter[call[name[os].path.dirname, parameter[name[fname]]]]]
variable[m_names] assign[=] list[[<ast.Call object at 0x7da18f58e6e0>]]
if compare[name[physical_name] <ast.NotIn object at 0x7da2590d7190> name[m_names]] begin[:]
call[name[m_names].append, parameter[name[physical_name]]]
for taget[name[m_name]] in starred[name[m_names]] begin[:]
if <ast.UnaryOp object at 0x7da18f58dea0> begin[:]
continue
call[name[result].update, parameter[call[call[call[call[call[name[env_src].get, parameter[constant[system-environment], dictionary[[], []]]].get, parameter[name[section], dictionary[[], []]]].get, parameter[name[m_name], dictionary[[], []]]].get, parameter[constant[_], dictionary[[], []]]].copy, parameter[]]]]
if compare[name[function] is_not constant[None]] begin[:]
call[name[result].update, parameter[call[call[call[call[call[name[env_src].get, parameter[constant[system-environment], dictionary[[], []]]].get, parameter[name[section], dictionary[[], []]]].get, parameter[name[m_name], dictionary[[], []]]].get, parameter[name[function], dictionary[[], []]]].copy, parameter[]]]]
return[name[result]] | keyword[def] identifier[get_module_environment] ( identifier[env] = keyword[None] , identifier[function] = keyword[None] ):
literal[string]
identifier[result] ={}
keyword[if] keyword[not] identifier[env] :
identifier[env] ={}
keyword[for] identifier[env_src] keyword[in] [ identifier[env] . identifier[get] ( literal[string] ,{}), identifier[env] . identifier[get] ( literal[string] ,{})]:
identifier[fname] = identifier[env] . identifier[get] ( literal[string] , literal[string] )
identifier[physical_name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[fname] ). identifier[split] ( literal[string] )[ literal[int] ]
identifier[section] = identifier[os] . identifier[path] . identifier[basename] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[fname] ))
identifier[m_names] =[ identifier[env] . identifier[get] ( literal[string] )]
keyword[if] identifier[physical_name] keyword[not] keyword[in] identifier[m_names] :
identifier[m_names] . identifier[append] ( identifier[physical_name] )
keyword[for] identifier[m_name] keyword[in] identifier[m_names] :
keyword[if] keyword[not] identifier[m_name] :
keyword[continue]
identifier[result] . identifier[update] ( identifier[env_src] . identifier[get] ( literal[string] ,{}). identifier[get] (
identifier[section] ,{}). identifier[get] ( identifier[m_name] ,{}). identifier[get] ( literal[string] ,{}). identifier[copy] ())
keyword[if] identifier[function] keyword[is] keyword[not] keyword[None] :
identifier[result] . identifier[update] ( identifier[env_src] . identifier[get] ( literal[string] ,{}). identifier[get] (
identifier[section] ,{}). identifier[get] ( identifier[m_name] ,{}). identifier[get] ( identifier[function] ,{}). identifier[copy] ())
keyword[return] identifier[result] | def get_module_environment(env=None, function=None):
"""
Get module optional environment.
To setup an environment option for a particular module,
add either pillar or config at the minion as follows:
system-environment:
modules:
pkg:
_:
LC_ALL: en_GB.UTF-8
FOO: bar
install:
HELLO: world
states:
pkg:
_:
LC_ALL: en_US.Latin-1
NAME: Fred
So this will export the environment to all the modules,
states, returnes etc. And calling this function with the globals()
in that context will fetch the environment for further reuse.
Underscore '_' exports environment for all functions within the module.
If you want to specifially export environment only for one function,
specify it as in the example above "install".
First will be fetched configuration, where virtual name goes first,
then the physical name of the module overrides the virtual settings.
Then pillar settings will override the configuration in the same order.
:param env:
:param function: name of a particular function
:return: dict
"""
result = {}
if not env:
env = {} # depends on [control=['if'], data=[]]
for env_src in [env.get('__opts__', {}), env.get('__pillar__', {})]:
fname = env.get('__file__', '')
physical_name = os.path.basename(fname).split('.')[0]
section = os.path.basename(os.path.dirname(fname))
m_names = [env.get('__virtualname__')]
if physical_name not in m_names:
m_names.append(physical_name) # depends on [control=['if'], data=['physical_name', 'm_names']]
for m_name in m_names:
if not m_name:
continue # depends on [control=['if'], data=[]]
result.update(env_src.get('system-environment', {}).get(section, {}).get(m_name, {}).get('_', {}).copy())
if function is not None:
result.update(env_src.get('system-environment', {}).get(section, {}).get(m_name, {}).get(function, {}).copy()) # depends on [control=['if'], data=['function']] # depends on [control=['for'], data=['m_name']] # depends on [control=['for'], data=['env_src']]
return result |
def find_city(self, city, state=None, best_match=True, min_similarity=70):
"""
Fuzzy search correct city.
:param city: city name.
:param state: search city in specified state.
:param best_match: bool, when True, only the best matched city
will return. otherwise, will return all matching cities.
**中文文档**
如果给定了state, 则只在指定的state里的城市中寻找, 否则, 在全国所有的城市中寻找。
"""
# find out what is the city that user looking for
if state:
state_sort = self.find_state(state, best_match=True)[0]
city_pool = self.state_to_city_mapper[state_sort.upper()]
else:
city_pool = self.city_list
result_city_list = list()
if best_match:
city, confidence = extractOne(city, city_pool)
if confidence >= min_similarity:
result_city_list.append(city)
else:
for city, confidence in extract(city, city_pool):
if confidence >= min_similarity:
result_city_list.append(city)
if len(result_city_list) == 0:
raise ValueError("'%s' is not a valid city name" % city)
return result_city_list | def function[find_city, parameter[self, city, state, best_match, min_similarity]]:
constant[
Fuzzy search correct city.
:param city: city name.
:param state: search city in specified state.
:param best_match: bool, when True, only the best matched city
will return. otherwise, will return all matching cities.
**中文文档**
如果给定了state, 则只在指定的state里的城市中寻找, 否则, 在全国所有的城市中寻找。
]
if name[state] begin[:]
variable[state_sort] assign[=] call[call[name[self].find_state, parameter[name[state]]]][constant[0]]
variable[city_pool] assign[=] call[name[self].state_to_city_mapper][call[name[state_sort].upper, parameter[]]]
variable[result_city_list] assign[=] call[name[list], parameter[]]
if name[best_match] begin[:]
<ast.Tuple object at 0x7da20e9b0df0> assign[=] call[name[extractOne], parameter[name[city], name[city_pool]]]
if compare[name[confidence] greater_or_equal[>=] name[min_similarity]] begin[:]
call[name[result_city_list].append, parameter[name[city]]]
if compare[call[name[len], parameter[name[result_city_list]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da20e9b15d0>
return[name[result_city_list]] | keyword[def] identifier[find_city] ( identifier[self] , identifier[city] , identifier[state] = keyword[None] , identifier[best_match] = keyword[True] , identifier[min_similarity] = literal[int] ):
literal[string]
keyword[if] identifier[state] :
identifier[state_sort] = identifier[self] . identifier[find_state] ( identifier[state] , identifier[best_match] = keyword[True] )[ literal[int] ]
identifier[city_pool] = identifier[self] . identifier[state_to_city_mapper] [ identifier[state_sort] . identifier[upper] ()]
keyword[else] :
identifier[city_pool] = identifier[self] . identifier[city_list]
identifier[result_city_list] = identifier[list] ()
keyword[if] identifier[best_match] :
identifier[city] , identifier[confidence] = identifier[extractOne] ( identifier[city] , identifier[city_pool] )
keyword[if] identifier[confidence] >= identifier[min_similarity] :
identifier[result_city_list] . identifier[append] ( identifier[city] )
keyword[else] :
keyword[for] identifier[city] , identifier[confidence] keyword[in] identifier[extract] ( identifier[city] , identifier[city_pool] ):
keyword[if] identifier[confidence] >= identifier[min_similarity] :
identifier[result_city_list] . identifier[append] ( identifier[city] )
keyword[if] identifier[len] ( identifier[result_city_list] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[city] )
keyword[return] identifier[result_city_list] | def find_city(self, city, state=None, best_match=True, min_similarity=70):
"""
Fuzzy search correct city.
:param city: city name.
:param state: search city in specified state.
:param best_match: bool, when True, only the best matched city
will return. otherwise, will return all matching cities.
**中文文档**
如果给定了state, 则只在指定的state里的城市中寻找, 否则, 在全国所有的城市中寻找。
"""
# find out what is the city that user looking for
if state:
state_sort = self.find_state(state, best_match=True)[0]
city_pool = self.state_to_city_mapper[state_sort.upper()] # depends on [control=['if'], data=[]]
else:
city_pool = self.city_list
result_city_list = list()
if best_match:
(city, confidence) = extractOne(city, city_pool)
if confidence >= min_similarity:
result_city_list.append(city) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
for (city, confidence) in extract(city, city_pool):
if confidence >= min_similarity:
result_city_list.append(city) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if len(result_city_list) == 0:
raise ValueError("'%s' is not a valid city name" % city) # depends on [control=['if'], data=[]]
return result_city_list |
def get(self, hostport):
"""Get a Peer for the given destination.
A new Peer is added to the peer heap and returned if one does
not already exist for the given host-port. Otherwise, the
existing Peer is returned.
"""
assert hostport, "hostport is required"
assert isinstance(hostport, basestring), "hostport must be a string"
if hostport not in self._peers:
self._add(hostport)
return self._peers[hostport] | def function[get, parameter[self, hostport]]:
constant[Get a Peer for the given destination.
A new Peer is added to the peer heap and returned if one does
not already exist for the given host-port. Otherwise, the
existing Peer is returned.
]
assert[name[hostport]]
assert[call[name[isinstance], parameter[name[hostport], name[basestring]]]]
if compare[name[hostport] <ast.NotIn object at 0x7da2590d7190> name[self]._peers] begin[:]
call[name[self]._add, parameter[name[hostport]]]
return[call[name[self]._peers][name[hostport]]] | keyword[def] identifier[get] ( identifier[self] , identifier[hostport] ):
literal[string]
keyword[assert] identifier[hostport] , literal[string]
keyword[assert] identifier[isinstance] ( identifier[hostport] , identifier[basestring] ), literal[string]
keyword[if] identifier[hostport] keyword[not] keyword[in] identifier[self] . identifier[_peers] :
identifier[self] . identifier[_add] ( identifier[hostport] )
keyword[return] identifier[self] . identifier[_peers] [ identifier[hostport] ] | def get(self, hostport):
"""Get a Peer for the given destination.
A new Peer is added to the peer heap and returned if one does
not already exist for the given host-port. Otherwise, the
existing Peer is returned.
"""
assert hostport, 'hostport is required'
assert isinstance(hostport, basestring), 'hostport must be a string'
if hostport not in self._peers:
self._add(hostport) # depends on [control=['if'], data=['hostport']]
return self._peers[hostport] |
def _make_clusters(self, matrix, num_clusters_per_roi, metric):
"""clusters a given matrix by into specified number of clusters according to given metric"""
from scipy.cluster.hierarchy import fclusterdata
# maxclust needed to ensure t is interpreted as # clusters in heirarchical clustering
group_ids = fclusterdata(matrix, metric=metric, t=num_clusters_per_roi,
criterion='maxclust')
group_set = np.unique(group_ids)
clusters = [
self._summary_func(matrix[group_ids == group, :], axis=0, keepdims=True)
for group in group_set]
return np.vstack(clusters).squeeze() | def function[_make_clusters, parameter[self, matrix, num_clusters_per_roi, metric]]:
constant[clusters a given matrix by into specified number of clusters according to given metric]
from relative_module[scipy.cluster.hierarchy] import module[fclusterdata]
variable[group_ids] assign[=] call[name[fclusterdata], parameter[name[matrix]]]
variable[group_set] assign[=] call[name[np].unique, parameter[name[group_ids]]]
variable[clusters] assign[=] <ast.ListComp object at 0x7da18bcc8070>
return[call[call[name[np].vstack, parameter[name[clusters]]].squeeze, parameter[]]] | keyword[def] identifier[_make_clusters] ( identifier[self] , identifier[matrix] , identifier[num_clusters_per_roi] , identifier[metric] ):
literal[string]
keyword[from] identifier[scipy] . identifier[cluster] . identifier[hierarchy] keyword[import] identifier[fclusterdata]
identifier[group_ids] = identifier[fclusterdata] ( identifier[matrix] , identifier[metric] = identifier[metric] , identifier[t] = identifier[num_clusters_per_roi] ,
identifier[criterion] = literal[string] )
identifier[group_set] = identifier[np] . identifier[unique] ( identifier[group_ids] )
identifier[clusters] =[
identifier[self] . identifier[_summary_func] ( identifier[matrix] [ identifier[group_ids] == identifier[group] ,:], identifier[axis] = literal[int] , identifier[keepdims] = keyword[True] )
keyword[for] identifier[group] keyword[in] identifier[group_set] ]
keyword[return] identifier[np] . identifier[vstack] ( identifier[clusters] ). identifier[squeeze] () | def _make_clusters(self, matrix, num_clusters_per_roi, metric):
"""clusters a given matrix by into specified number of clusters according to given metric"""
from scipy.cluster.hierarchy import fclusterdata
# maxclust needed to ensure t is interpreted as # clusters in heirarchical clustering
group_ids = fclusterdata(matrix, metric=metric, t=num_clusters_per_roi, criterion='maxclust')
group_set = np.unique(group_ids)
clusters = [self._summary_func(matrix[group_ids == group, :], axis=0, keepdims=True) for group in group_set]
return np.vstack(clusters).squeeze() |
def _next_token(sql):
""" This is a basic tokenizer for our limited purposes.
It splits a SQL statement up into a series of segments, where a segment is one of:
- identifiers
- left or right parentheses
- multi-line comments
- single line comments
- white-space sequences
- string literals
- consecutive strings of characters that are not one of the items above
The aim is for us to be able to find function calls (identifiers followed by '('), and the
associated closing ')') so we can augment these if needed.
Args:
sql: a SQL statement as a (possibly multi-line) string.
Returns:
For each call, the next token in the initial input.
"""
i = 0
# We use def statements here to make the logic more clear. The start_* functions return
# true if i is the index of the start of that construct, while the end_* functions
# return true if i point to the first character beyond that construct or the end of the
# content.
#
# We don't currently need numbers so the tokenizer here just does sequences of
# digits as a convenience to shrink the total number of tokens. If we needed numbers
# later we would need a special handler for these much like strings.
def start_multi_line_comment(s, i):
return s[i] == '/' and i < len(s) - 1 and s[i + 1] == '*'
def end_multi_line_comment(s, i):
return s[i - 2] == '*' and s[i - 1] == '/'
def start_single_line_comment(s, i):
return s[i] == '-' and i < len(s) - 1 and s[i + 1] == '-'
def end_single_line_comment(s, i):
return s[i - 1] == '\n'
def start_whitespace(s, i):
return s[i].isspace()
def end_whitespace(s, i):
return not s[i].isspace()
def start_number(s, i):
return s[i].isdigit()
def end_number(s, i):
return not s[i].isdigit()
def start_identifier(s, i):
return s[i].isalpha() or s[i] == '_' or s[i] == '$'
def end_identifier(s, i):
return not(s[i].isalnum() or s[i] == '_')
def start_string(s, i):
return s[i] == '"' or s[i] == "'"
def always_true(s, i):
return True
while i < len(sql):
start = i
if start_multi_line_comment(sql, i):
i += 1
end_checker = end_multi_line_comment
elif start_single_line_comment(sql, i):
i += 1
end_checker = end_single_line_comment
elif start_whitespace(sql, i):
end_checker = end_whitespace
elif start_identifier(sql, i):
end_checker = end_identifier
elif start_number(sql, i):
end_checker = end_number
elif start_string(sql, i):
# Special handling here as we need to check for escaped closing quotes.
quote = sql[i]
end_checker = always_true
i += 1
while i < len(sql) and sql[i] != quote:
i += 2 if sql[i] == '\\' else 1
else:
# We return single characters for everything else
end_checker = always_true
i += 1
while i < len(sql) and not end_checker(sql, i):
i += 1
(yield sql[start:i]) | def function[_next_token, parameter[sql]]:
constant[ This is a basic tokenizer for our limited purposes.
It splits a SQL statement up into a series of segments, where a segment is one of:
- identifiers
- left or right parentheses
- multi-line comments
- single line comments
- white-space sequences
- string literals
- consecutive strings of characters that are not one of the items above
The aim is for us to be able to find function calls (identifiers followed by '('), and the
associated closing ')') so we can augment these if needed.
Args:
sql: a SQL statement as a (possibly multi-line) string.
Returns:
For each call, the next token in the initial input.
]
variable[i] assign[=] constant[0]
def function[start_multi_line_comment, parameter[s, i]]:
return[<ast.BoolOp object at 0x7da2044c1300>]
def function[end_multi_line_comment, parameter[s, i]]:
return[<ast.BoolOp object at 0x7da2044c1090>]
def function[start_single_line_comment, parameter[s, i]]:
return[<ast.BoolOp object at 0x7da2044c1de0>]
def function[end_single_line_comment, parameter[s, i]]:
return[compare[call[name[s]][binary_operation[name[i] - constant[1]]] equal[==] constant[
]]]
def function[start_whitespace, parameter[s, i]]:
return[call[call[name[s]][name[i]].isspace, parameter[]]]
def function[end_whitespace, parameter[s, i]]:
return[<ast.UnaryOp object at 0x7da2044c3dc0>]
def function[start_number, parameter[s, i]]:
return[call[call[name[s]][name[i]].isdigit, parameter[]]]
def function[end_number, parameter[s, i]]:
return[<ast.UnaryOp object at 0x7da1b2347be0>]
def function[start_identifier, parameter[s, i]]:
return[<ast.BoolOp object at 0x7da1b2344b50>]
def function[end_identifier, parameter[s, i]]:
return[<ast.UnaryOp object at 0x7da1b2344a90>]
def function[start_string, parameter[s, i]]:
return[<ast.BoolOp object at 0x7da1b2344e50>]
def function[always_true, parameter[s, i]]:
return[constant[True]]
while compare[name[i] less[<] call[name[len], parameter[name[sql]]]] begin[:]
variable[start] assign[=] name[i]
if call[name[start_multi_line_comment], parameter[name[sql], name[i]]] begin[:]
<ast.AugAssign object at 0x7da1b23479a0>
variable[end_checker] assign[=] name[end_multi_line_comment]
<ast.AugAssign object at 0x7da2044c2fe0>
while <ast.BoolOp object at 0x7da2044c3430> begin[:]
<ast.AugAssign object at 0x7da2044c2f50>
<ast.Yield object at 0x7da2044c00d0> | keyword[def] identifier[_next_token] ( identifier[sql] ):
literal[string]
identifier[i] = literal[int]
keyword[def] identifier[start_multi_line_comment] ( identifier[s] , identifier[i] ):
keyword[return] identifier[s] [ identifier[i] ]== literal[string] keyword[and] identifier[i] < identifier[len] ( identifier[s] )- literal[int] keyword[and] identifier[s] [ identifier[i] + literal[int] ]== literal[string]
keyword[def] identifier[end_multi_line_comment] ( identifier[s] , identifier[i] ):
keyword[return] identifier[s] [ identifier[i] - literal[int] ]== literal[string] keyword[and] identifier[s] [ identifier[i] - literal[int] ]== literal[string]
keyword[def] identifier[start_single_line_comment] ( identifier[s] , identifier[i] ):
keyword[return] identifier[s] [ identifier[i] ]== literal[string] keyword[and] identifier[i] < identifier[len] ( identifier[s] )- literal[int] keyword[and] identifier[s] [ identifier[i] + literal[int] ]== literal[string]
keyword[def] identifier[end_single_line_comment] ( identifier[s] , identifier[i] ):
keyword[return] identifier[s] [ identifier[i] - literal[int] ]== literal[string]
keyword[def] identifier[start_whitespace] ( identifier[s] , identifier[i] ):
keyword[return] identifier[s] [ identifier[i] ]. identifier[isspace] ()
keyword[def] identifier[end_whitespace] ( identifier[s] , identifier[i] ):
keyword[return] keyword[not] identifier[s] [ identifier[i] ]. identifier[isspace] ()
keyword[def] identifier[start_number] ( identifier[s] , identifier[i] ):
keyword[return] identifier[s] [ identifier[i] ]. identifier[isdigit] ()
keyword[def] identifier[end_number] ( identifier[s] , identifier[i] ):
keyword[return] keyword[not] identifier[s] [ identifier[i] ]. identifier[isdigit] ()
keyword[def] identifier[start_identifier] ( identifier[s] , identifier[i] ):
keyword[return] identifier[s] [ identifier[i] ]. identifier[isalpha] () keyword[or] identifier[s] [ identifier[i] ]== literal[string] keyword[or] identifier[s] [ identifier[i] ]== literal[string]
keyword[def] identifier[end_identifier] ( identifier[s] , identifier[i] ):
keyword[return] keyword[not] ( identifier[s] [ identifier[i] ]. identifier[isalnum] () keyword[or] identifier[s] [ identifier[i] ]== literal[string] )
keyword[def] identifier[start_string] ( identifier[s] , identifier[i] ):
keyword[return] identifier[s] [ identifier[i] ]== literal[string] keyword[or] identifier[s] [ identifier[i] ]== literal[string]
keyword[def] identifier[always_true] ( identifier[s] , identifier[i] ):
keyword[return] keyword[True]
keyword[while] identifier[i] < identifier[len] ( identifier[sql] ):
identifier[start] = identifier[i]
keyword[if] identifier[start_multi_line_comment] ( identifier[sql] , identifier[i] ):
identifier[i] += literal[int]
identifier[end_checker] = identifier[end_multi_line_comment]
keyword[elif] identifier[start_single_line_comment] ( identifier[sql] , identifier[i] ):
identifier[i] += literal[int]
identifier[end_checker] = identifier[end_single_line_comment]
keyword[elif] identifier[start_whitespace] ( identifier[sql] , identifier[i] ):
identifier[end_checker] = identifier[end_whitespace]
keyword[elif] identifier[start_identifier] ( identifier[sql] , identifier[i] ):
identifier[end_checker] = identifier[end_identifier]
keyword[elif] identifier[start_number] ( identifier[sql] , identifier[i] ):
identifier[end_checker] = identifier[end_number]
keyword[elif] identifier[start_string] ( identifier[sql] , identifier[i] ):
identifier[quote] = identifier[sql] [ identifier[i] ]
identifier[end_checker] = identifier[always_true]
identifier[i] += literal[int]
keyword[while] identifier[i] < identifier[len] ( identifier[sql] ) keyword[and] identifier[sql] [ identifier[i] ]!= identifier[quote] :
identifier[i] += literal[int] keyword[if] identifier[sql] [ identifier[i] ]== literal[string] keyword[else] literal[int]
keyword[else] :
identifier[end_checker] = identifier[always_true]
identifier[i] += literal[int]
keyword[while] identifier[i] < identifier[len] ( identifier[sql] ) keyword[and] keyword[not] identifier[end_checker] ( identifier[sql] , identifier[i] ):
identifier[i] += literal[int]
( keyword[yield] identifier[sql] [ identifier[start] : identifier[i] ]) | def _next_token(sql):
""" This is a basic tokenizer for our limited purposes.
It splits a SQL statement up into a series of segments, where a segment is one of:
- identifiers
- left or right parentheses
- multi-line comments
- single line comments
- white-space sequences
- string literals
- consecutive strings of characters that are not one of the items above
The aim is for us to be able to find function calls (identifiers followed by '('), and the
associated closing ')') so we can augment these if needed.
Args:
sql: a SQL statement as a (possibly multi-line) string.
Returns:
For each call, the next token in the initial input.
"""
i = 0
# We use def statements here to make the logic more clear. The start_* functions return
# true if i is the index of the start of that construct, while the end_* functions
# return true if i point to the first character beyond that construct or the end of the
# content.
#
# We don't currently need numbers so the tokenizer here just does sequences of
# digits as a convenience to shrink the total number of tokens. If we needed numbers
# later we would need a special handler for these much like strings.
def start_multi_line_comment(s, i):
return s[i] == '/' and i < len(s) - 1 and (s[i + 1] == '*')
def end_multi_line_comment(s, i):
return s[i - 2] == '*' and s[i - 1] == '/'
def start_single_line_comment(s, i):
return s[i] == '-' and i < len(s) - 1 and (s[i + 1] == '-')
def end_single_line_comment(s, i):
return s[i - 1] == '\n'
def start_whitespace(s, i):
return s[i].isspace()
def end_whitespace(s, i):
return not s[i].isspace()
def start_number(s, i):
return s[i].isdigit()
def end_number(s, i):
return not s[i].isdigit()
def start_identifier(s, i):
return s[i].isalpha() or s[i] == '_' or s[i] == '$'
def end_identifier(s, i):
return not (s[i].isalnum() or s[i] == '_')
def start_string(s, i):
return s[i] == '"' or s[i] == "'"
def always_true(s, i):
return True
while i < len(sql):
start = i
if start_multi_line_comment(sql, i):
i += 1
end_checker = end_multi_line_comment # depends on [control=['if'], data=[]]
elif start_single_line_comment(sql, i):
i += 1
end_checker = end_single_line_comment # depends on [control=['if'], data=[]]
elif start_whitespace(sql, i):
end_checker = end_whitespace # depends on [control=['if'], data=[]]
elif start_identifier(sql, i):
end_checker = end_identifier # depends on [control=['if'], data=[]]
elif start_number(sql, i):
end_checker = end_number # depends on [control=['if'], data=[]]
elif start_string(sql, i):
# Special handling here as we need to check for escaped closing quotes.
quote = sql[i]
end_checker = always_true
i += 1
while i < len(sql) and sql[i] != quote:
i += 2 if sql[i] == '\\' else 1 # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
# We return single characters for everything else
end_checker = always_true
i += 1
while i < len(sql) and (not end_checker(sql, i)):
i += 1 # depends on [control=['while'], data=[]]
yield sql[start:i] # depends on [control=['while'], data=['i']] |
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', limit_area=None,
downcast=None, **kwargs):
"""
Interpolate values according to different methods.
.. versionadded:: 0.18.1
"""
result = self._upsample(None)
return result.interpolate(method=method, axis=axis, limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast, **kwargs) | def function[interpolate, parameter[self, method, axis, limit, inplace, limit_direction, limit_area, downcast]]:
constant[
Interpolate values according to different methods.
.. versionadded:: 0.18.1
]
variable[result] assign[=] call[name[self]._upsample, parameter[constant[None]]]
return[call[name[result].interpolate, parameter[]]] | keyword[def] identifier[interpolate] ( identifier[self] , identifier[method] = literal[string] , identifier[axis] = literal[int] , identifier[limit] = keyword[None] , identifier[inplace] = keyword[False] ,
identifier[limit_direction] = literal[string] , identifier[limit_area] = keyword[None] ,
identifier[downcast] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[result] = identifier[self] . identifier[_upsample] ( keyword[None] )
keyword[return] identifier[result] . identifier[interpolate] ( identifier[method] = identifier[method] , identifier[axis] = identifier[axis] , identifier[limit] = identifier[limit] ,
identifier[inplace] = identifier[inplace] ,
identifier[limit_direction] = identifier[limit_direction] ,
identifier[limit_area] = identifier[limit_area] ,
identifier[downcast] = identifier[downcast] ,** identifier[kwargs] ) | def interpolate(self, method='linear', axis=0, limit=None, inplace=False, limit_direction='forward', limit_area=None, downcast=None, **kwargs):
"""
Interpolate values according to different methods.
.. versionadded:: 0.18.1
"""
result = self._upsample(None)
return result.interpolate(method=method, axis=axis, limit=limit, inplace=inplace, limit_direction=limit_direction, limit_area=limit_area, downcast=downcast, **kwargs) |
def generate_view_data(self):
"""Generate the views."""
self.view_data['version'] = '{} {}'.format('Glances', __version__)
self.view_data['psutil_version'] = ' with psutil {}'.format(psutil_version)
try:
self.view_data['configuration_file'] = 'Configuration file: {}'.format(self.config.loaded_config_file)
except AttributeError:
pass
msg_col = ' {0:1} {1:35}'
msg_col2 = ' {0:1} {1:35}'
self.view_data['sort_auto'] = msg_col.format('a', 'Sort processes automatically')
self.view_data['sort_network'] = msg_col2.format('b', 'Bytes or bits for network I/O')
self.view_data['sort_cpu'] = msg_col.format('c', 'Sort processes by CPU%')
self.view_data['show_hide_alert'] = msg_col2.format('l', 'Show/hide alert logs')
self.view_data['sort_mem'] = msg_col.format('m', 'Sort processes by MEM%')
self.view_data['sort_user'] = msg_col.format('u', 'Sort processes by USER')
self.view_data['delete_warning_alerts'] = msg_col2.format('w', 'Delete warning alerts')
self.view_data['sort_proc'] = msg_col.format('p', 'Sort processes by name')
self.view_data['delete_warning_critical_alerts'] = msg_col2.format('x', 'Delete warning and critical alerts')
self.view_data['sort_io'] = msg_col.format('i', 'Sort processes by I/O rate')
self.view_data['percpu'] = msg_col2.format('1', 'Global CPU or per-CPU stats')
self.view_data['sort_cpu_times'] = msg_col.format('t', 'Sort processes by TIME')
self.view_data['show_hide_help'] = msg_col2.format('h', 'Show/hide this help screen')
self.view_data['show_hide_diskio'] = msg_col.format('d', 'Show/hide disk I/O stats')
self.view_data['show_hide_irq'] = msg_col2.format('Q', 'Show/hide IRQ stats')
self.view_data['view_network_io_combination'] = msg_col2.format('T', 'View network I/O as combination')
self.view_data['show_hide_filesystem'] = msg_col.format('f', 'Show/hide filesystem stats')
self.view_data['view_cumulative_network'] = msg_col2.format('U', 'View cumulative network I/O')
self.view_data['show_hide_network'] = msg_col.format('n', 'Show/hide network stats')
self.view_data['show_hide_filesytem_freespace'] = msg_col2.format('F', 'Show filesystem free space')
self.view_data['show_hide_sensors'] = msg_col.format('s', 'Show/hide sensors stats')
self.view_data['generate_graphs'] = msg_col2.format('g', 'Generate graphs for current history')
self.view_data['show_hide_left_sidebar'] = msg_col.format('2', 'Show/hide left sidebar')
self.view_data['reset_history'] = msg_col2.format('r', 'Reset history')
self.view_data['enable_disable_process_stats'] = msg_col.format('z', 'Enable/disable processes stats')
self.view_data['quit'] = msg_col2.format('q', 'Quit (Esc and Ctrl-C also work)')
self.view_data['enable_disable_top_extends_stats'] = msg_col.format('e', 'Enable/disable top extended stats')
self.view_data['enable_disable_short_processname'] = msg_col.format('/', 'Enable/disable short processes name')
self.view_data['enable_disable_irix'] = msg_col.format('0', 'Enable/disable Irix process CPU')
self.view_data['enable_disable_docker'] = msg_col2.format('D', 'Enable/disable Docker stats')
self.view_data['enable_disable_quick_look'] = msg_col.format('3', 'Enable/disable quick look plugin')
self.view_data['show_hide_ip'] = msg_col2.format('I', 'Show/hide IP module')
self.view_data['diskio_iops'] = msg_col2.format('B', 'Count/rate for Disk I/O')
self.view_data['show_hide_top_menu'] = msg_col2.format('5', 'Show/hide top menu (QL, CPU, MEM, SWAP and LOAD)')
self.view_data['enable_disable_gpu'] = msg_col.format('G', 'Enable/disable gpu plugin')
self.view_data['enable_disable_mean_gpu'] = msg_col2.format('6', 'Enable/disable mean gpu')
self.view_data['edit_pattern_filter'] = 'ENTER: Edit the process filter pattern' | def function[generate_view_data, parameter[self]]:
constant[Generate the views.]
call[name[self].view_data][constant[version]] assign[=] call[constant[{} {}].format, parameter[constant[Glances], name[__version__]]]
call[name[self].view_data][constant[psutil_version]] assign[=] call[constant[ with psutil {}].format, parameter[name[psutil_version]]]
<ast.Try object at 0x7da1b1c3f2e0>
variable[msg_col] assign[=] constant[ {0:1} {1:35}]
variable[msg_col2] assign[=] constant[ {0:1} {1:35}]
call[name[self].view_data][constant[sort_auto]] assign[=] call[name[msg_col].format, parameter[constant[a], constant[Sort processes automatically]]]
call[name[self].view_data][constant[sort_network]] assign[=] call[name[msg_col2].format, parameter[constant[b], constant[Bytes or bits for network I/O]]]
call[name[self].view_data][constant[sort_cpu]] assign[=] call[name[msg_col].format, parameter[constant[c], constant[Sort processes by CPU%]]]
call[name[self].view_data][constant[show_hide_alert]] assign[=] call[name[msg_col2].format, parameter[constant[l], constant[Show/hide alert logs]]]
call[name[self].view_data][constant[sort_mem]] assign[=] call[name[msg_col].format, parameter[constant[m], constant[Sort processes by MEM%]]]
call[name[self].view_data][constant[sort_user]] assign[=] call[name[msg_col].format, parameter[constant[u], constant[Sort processes by USER]]]
call[name[self].view_data][constant[delete_warning_alerts]] assign[=] call[name[msg_col2].format, parameter[constant[w], constant[Delete warning alerts]]]
call[name[self].view_data][constant[sort_proc]] assign[=] call[name[msg_col].format, parameter[constant[p], constant[Sort processes by name]]]
call[name[self].view_data][constant[delete_warning_critical_alerts]] assign[=] call[name[msg_col2].format, parameter[constant[x], constant[Delete warning and critical alerts]]]
call[name[self].view_data][constant[sort_io]] assign[=] call[name[msg_col].format, parameter[constant[i], constant[Sort processes by I/O rate]]]
call[name[self].view_data][constant[percpu]] assign[=] call[name[msg_col2].format, parameter[constant[1], constant[Global CPU or per-CPU stats]]]
call[name[self].view_data][constant[sort_cpu_times]] assign[=] call[name[msg_col].format, parameter[constant[t], constant[Sort processes by TIME]]]
call[name[self].view_data][constant[show_hide_help]] assign[=] call[name[msg_col2].format, parameter[constant[h], constant[Show/hide this help screen]]]
call[name[self].view_data][constant[show_hide_diskio]] assign[=] call[name[msg_col].format, parameter[constant[d], constant[Show/hide disk I/O stats]]]
call[name[self].view_data][constant[show_hide_irq]] assign[=] call[name[msg_col2].format, parameter[constant[Q], constant[Show/hide IRQ stats]]]
call[name[self].view_data][constant[view_network_io_combination]] assign[=] call[name[msg_col2].format, parameter[constant[T], constant[View network I/O as combination]]]
call[name[self].view_data][constant[show_hide_filesystem]] assign[=] call[name[msg_col].format, parameter[constant[f], constant[Show/hide filesystem stats]]]
call[name[self].view_data][constant[view_cumulative_network]] assign[=] call[name[msg_col2].format, parameter[constant[U], constant[View cumulative network I/O]]]
call[name[self].view_data][constant[show_hide_network]] assign[=] call[name[msg_col].format, parameter[constant[n], constant[Show/hide network stats]]]
call[name[self].view_data][constant[show_hide_filesytem_freespace]] assign[=] call[name[msg_col2].format, parameter[constant[F], constant[Show filesystem free space]]]
call[name[self].view_data][constant[show_hide_sensors]] assign[=] call[name[msg_col].format, parameter[constant[s], constant[Show/hide sensors stats]]]
call[name[self].view_data][constant[generate_graphs]] assign[=] call[name[msg_col2].format, parameter[constant[g], constant[Generate graphs for current history]]]
call[name[self].view_data][constant[show_hide_left_sidebar]] assign[=] call[name[msg_col].format, parameter[constant[2], constant[Show/hide left sidebar]]]
call[name[self].view_data][constant[reset_history]] assign[=] call[name[msg_col2].format, parameter[constant[r], constant[Reset history]]]
call[name[self].view_data][constant[enable_disable_process_stats]] assign[=] call[name[msg_col].format, parameter[constant[z], constant[Enable/disable processes stats]]]
call[name[self].view_data][constant[quit]] assign[=] call[name[msg_col2].format, parameter[constant[q], constant[Quit (Esc and Ctrl-C also work)]]]
call[name[self].view_data][constant[enable_disable_top_extends_stats]] assign[=] call[name[msg_col].format, parameter[constant[e], constant[Enable/disable top extended stats]]]
call[name[self].view_data][constant[enable_disable_short_processname]] assign[=] call[name[msg_col].format, parameter[constant[/], constant[Enable/disable short processes name]]]
call[name[self].view_data][constant[enable_disable_irix]] assign[=] call[name[msg_col].format, parameter[constant[0], constant[Enable/disable Irix process CPU]]]
call[name[self].view_data][constant[enable_disable_docker]] assign[=] call[name[msg_col2].format, parameter[constant[D], constant[Enable/disable Docker stats]]]
call[name[self].view_data][constant[enable_disable_quick_look]] assign[=] call[name[msg_col].format, parameter[constant[3], constant[Enable/disable quick look plugin]]]
call[name[self].view_data][constant[show_hide_ip]] assign[=] call[name[msg_col2].format, parameter[constant[I], constant[Show/hide IP module]]]
call[name[self].view_data][constant[diskio_iops]] assign[=] call[name[msg_col2].format, parameter[constant[B], constant[Count/rate for Disk I/O]]]
call[name[self].view_data][constant[show_hide_top_menu]] assign[=] call[name[msg_col2].format, parameter[constant[5], constant[Show/hide top menu (QL, CPU, MEM, SWAP and LOAD)]]]
call[name[self].view_data][constant[enable_disable_gpu]] assign[=] call[name[msg_col].format, parameter[constant[G], constant[Enable/disable gpu plugin]]]
call[name[self].view_data][constant[enable_disable_mean_gpu]] assign[=] call[name[msg_col2].format, parameter[constant[6], constant[Enable/disable mean gpu]]]
call[name[self].view_data][constant[edit_pattern_filter]] assign[=] constant[ENTER: Edit the process filter pattern] | keyword[def] identifier[generate_view_data] ( identifier[self] ):
literal[string]
identifier[self] . identifier[view_data] [ literal[string] ]= literal[string] . identifier[format] ( literal[string] , identifier[__version__] )
identifier[self] . identifier[view_data] [ literal[string] ]= literal[string] . identifier[format] ( identifier[psutil_version] )
keyword[try] :
identifier[self] . identifier[view_data] [ literal[string] ]= literal[string] . identifier[format] ( identifier[self] . identifier[config] . identifier[loaded_config_file] )
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[msg_col] = literal[string]
identifier[msg_col2] = literal[string]
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= identifier[msg_col2] . identifier[format] ( literal[string] , literal[string] )
identifier[self] . identifier[view_data] [ literal[string] ]= literal[string] | def generate_view_data(self):
"""Generate the views."""
self.view_data['version'] = '{} {}'.format('Glances', __version__)
self.view_data['psutil_version'] = ' with psutil {}'.format(psutil_version)
try:
self.view_data['configuration_file'] = 'Configuration file: {}'.format(self.config.loaded_config_file) # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
msg_col = ' {0:1} {1:35}'
msg_col2 = ' {0:1} {1:35}'
self.view_data['sort_auto'] = msg_col.format('a', 'Sort processes automatically')
self.view_data['sort_network'] = msg_col2.format('b', 'Bytes or bits for network I/O')
self.view_data['sort_cpu'] = msg_col.format('c', 'Sort processes by CPU%')
self.view_data['show_hide_alert'] = msg_col2.format('l', 'Show/hide alert logs')
self.view_data['sort_mem'] = msg_col.format('m', 'Sort processes by MEM%')
self.view_data['sort_user'] = msg_col.format('u', 'Sort processes by USER')
self.view_data['delete_warning_alerts'] = msg_col2.format('w', 'Delete warning alerts')
self.view_data['sort_proc'] = msg_col.format('p', 'Sort processes by name')
self.view_data['delete_warning_critical_alerts'] = msg_col2.format('x', 'Delete warning and critical alerts')
self.view_data['sort_io'] = msg_col.format('i', 'Sort processes by I/O rate')
self.view_data['percpu'] = msg_col2.format('1', 'Global CPU or per-CPU stats')
self.view_data['sort_cpu_times'] = msg_col.format('t', 'Sort processes by TIME')
self.view_data['show_hide_help'] = msg_col2.format('h', 'Show/hide this help screen')
self.view_data['show_hide_diskio'] = msg_col.format('d', 'Show/hide disk I/O stats')
self.view_data['show_hide_irq'] = msg_col2.format('Q', 'Show/hide IRQ stats')
self.view_data['view_network_io_combination'] = msg_col2.format('T', 'View network I/O as combination')
self.view_data['show_hide_filesystem'] = msg_col.format('f', 'Show/hide filesystem stats')
self.view_data['view_cumulative_network'] = msg_col2.format('U', 'View cumulative network I/O')
self.view_data['show_hide_network'] = msg_col.format('n', 'Show/hide network stats')
self.view_data['show_hide_filesytem_freespace'] = msg_col2.format('F', 'Show filesystem free space')
self.view_data['show_hide_sensors'] = msg_col.format('s', 'Show/hide sensors stats')
self.view_data['generate_graphs'] = msg_col2.format('g', 'Generate graphs for current history')
self.view_data['show_hide_left_sidebar'] = msg_col.format('2', 'Show/hide left sidebar')
self.view_data['reset_history'] = msg_col2.format('r', 'Reset history')
self.view_data['enable_disable_process_stats'] = msg_col.format('z', 'Enable/disable processes stats')
self.view_data['quit'] = msg_col2.format('q', 'Quit (Esc and Ctrl-C also work)')
self.view_data['enable_disable_top_extends_stats'] = msg_col.format('e', 'Enable/disable top extended stats')
self.view_data['enable_disable_short_processname'] = msg_col.format('/', 'Enable/disable short processes name')
self.view_data['enable_disable_irix'] = msg_col.format('0', 'Enable/disable Irix process CPU')
self.view_data['enable_disable_docker'] = msg_col2.format('D', 'Enable/disable Docker stats')
self.view_data['enable_disable_quick_look'] = msg_col.format('3', 'Enable/disable quick look plugin')
self.view_data['show_hide_ip'] = msg_col2.format('I', 'Show/hide IP module')
self.view_data['diskio_iops'] = msg_col2.format('B', 'Count/rate for Disk I/O')
self.view_data['show_hide_top_menu'] = msg_col2.format('5', 'Show/hide top menu (QL, CPU, MEM, SWAP and LOAD)')
self.view_data['enable_disable_gpu'] = msg_col.format('G', 'Enable/disable gpu plugin')
self.view_data['enable_disable_mean_gpu'] = msg_col2.format('6', 'Enable/disable mean gpu')
self.view_data['edit_pattern_filter'] = 'ENTER: Edit the process filter pattern' |
def _hm_send_msg(self, message):
"""This is the only interface to the serial connection."""
try:
serial_message = message
self.conn.write(serial_message) # Write a string
except serial.SerialTimeoutException:
serror = "Write timeout error: \n"
sys.stderr.write(serror)
# Now wait for reply
byteread = self.conn.read(159)
# NB max return is 75 in 5/2 mode or 159 in 7day mode
datal = list(byteread)
return datal | def function[_hm_send_msg, parameter[self, message]]:
constant[This is the only interface to the serial connection.]
<ast.Try object at 0x7da18eb54c70>
variable[byteread] assign[=] call[name[self].conn.read, parameter[constant[159]]]
variable[datal] assign[=] call[name[list], parameter[name[byteread]]]
return[name[datal]] | keyword[def] identifier[_hm_send_msg] ( identifier[self] , identifier[message] ):
literal[string]
keyword[try] :
identifier[serial_message] = identifier[message]
identifier[self] . identifier[conn] . identifier[write] ( identifier[serial_message] )
keyword[except] identifier[serial] . identifier[SerialTimeoutException] :
identifier[serror] = literal[string]
identifier[sys] . identifier[stderr] . identifier[write] ( identifier[serror] )
identifier[byteread] = identifier[self] . identifier[conn] . identifier[read] ( literal[int] )
identifier[datal] = identifier[list] ( identifier[byteread] )
keyword[return] identifier[datal] | def _hm_send_msg(self, message):
"""This is the only interface to the serial connection."""
try:
serial_message = message
self.conn.write(serial_message) # Write a string # depends on [control=['try'], data=[]]
except serial.SerialTimeoutException:
serror = 'Write timeout error: \n'
sys.stderr.write(serror) # depends on [control=['except'], data=[]] # Now wait for reply
byteread = self.conn.read(159) # NB max return is 75 in 5/2 mode or 159 in 7day mode
datal = list(byteread)
return datal |
def filter_instance(self, inst, plist):
"""Remove properties from an instance that aren't in the PropertyList
inst -- The pywbem.CIMInstance
plist -- The property List, or None. The list items must be all
lowercase.
"""
if plist is not None:
for pname in inst.properties.keys():
if pname.lower() not in plist and pname:
if inst.path is not None and pname in inst.path.keybindings:
continue
del inst.properties[pname] | def function[filter_instance, parameter[self, inst, plist]]:
constant[Remove properties from an instance that aren't in the PropertyList
inst -- The pywbem.CIMInstance
plist -- The property List, or None. The list items must be all
lowercase.
]
if compare[name[plist] is_not constant[None]] begin[:]
for taget[name[pname]] in starred[call[name[inst].properties.keys, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b0e9dde0> begin[:]
if <ast.BoolOp object at 0x7da1b0e9f340> begin[:]
continue
<ast.Delete object at 0x7da204962f80> | keyword[def] identifier[filter_instance] ( identifier[self] , identifier[inst] , identifier[plist] ):
literal[string]
keyword[if] identifier[plist] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[pname] keyword[in] identifier[inst] . identifier[properties] . identifier[keys] ():
keyword[if] identifier[pname] . identifier[lower] () keyword[not] keyword[in] identifier[plist] keyword[and] identifier[pname] :
keyword[if] identifier[inst] . identifier[path] keyword[is] keyword[not] keyword[None] keyword[and] identifier[pname] keyword[in] identifier[inst] . identifier[path] . identifier[keybindings] :
keyword[continue]
keyword[del] identifier[inst] . identifier[properties] [ identifier[pname] ] | def filter_instance(self, inst, plist):
"""Remove properties from an instance that aren't in the PropertyList
inst -- The pywbem.CIMInstance
plist -- The property List, or None. The list items must be all
lowercase.
"""
if plist is not None:
for pname in inst.properties.keys():
if pname.lower() not in plist and pname:
if inst.path is not None and pname in inst.path.keybindings:
continue # depends on [control=['if'], data=[]]
del inst.properties[pname] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pname']] # depends on [control=['if'], data=['plist']] |
def relpath(self, path, start=None):
"""We mostly rely on the native implementation and adapt the
path separator."""
if not path:
raise ValueError("no path specified")
path = make_string_path(path)
if start is not None:
start = make_string_path(start)
else:
start = self.filesystem.cwd
if self.filesystem.alternative_path_separator is not None:
path = path.replace(self.filesystem.alternative_path_separator,
self._os_path.sep)
start = start.replace(self.filesystem.alternative_path_separator,
self._os_path.sep)
path = path.replace(self.filesystem.path_separator, self._os_path.sep)
start = start.replace(
self.filesystem.path_separator, self._os_path.sep)
path = self._os_path.relpath(path, start)
return path.replace(self._os_path.sep, self.filesystem.path_separator) | def function[relpath, parameter[self, path, start]]:
constant[We mostly rely on the native implementation and adapt the
path separator.]
if <ast.UnaryOp object at 0x7da2047ea740> begin[:]
<ast.Raise object at 0x7da2047ea770>
variable[path] assign[=] call[name[make_string_path], parameter[name[path]]]
if compare[name[start] is_not constant[None]] begin[:]
variable[start] assign[=] call[name[make_string_path], parameter[name[start]]]
if compare[name[self].filesystem.alternative_path_separator is_not constant[None]] begin[:]
variable[path] assign[=] call[name[path].replace, parameter[name[self].filesystem.alternative_path_separator, name[self]._os_path.sep]]
variable[start] assign[=] call[name[start].replace, parameter[name[self].filesystem.alternative_path_separator, name[self]._os_path.sep]]
variable[path] assign[=] call[name[path].replace, parameter[name[self].filesystem.path_separator, name[self]._os_path.sep]]
variable[start] assign[=] call[name[start].replace, parameter[name[self].filesystem.path_separator, name[self]._os_path.sep]]
variable[path] assign[=] call[name[self]._os_path.relpath, parameter[name[path], name[start]]]
return[call[name[path].replace, parameter[name[self]._os_path.sep, name[self].filesystem.path_separator]]] | keyword[def] identifier[relpath] ( identifier[self] , identifier[path] , identifier[start] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[path] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[path] = identifier[make_string_path] ( identifier[path] )
keyword[if] identifier[start] keyword[is] keyword[not] keyword[None] :
identifier[start] = identifier[make_string_path] ( identifier[start] )
keyword[else] :
identifier[start] = identifier[self] . identifier[filesystem] . identifier[cwd]
keyword[if] identifier[self] . identifier[filesystem] . identifier[alternative_path_separator] keyword[is] keyword[not] keyword[None] :
identifier[path] = identifier[path] . identifier[replace] ( identifier[self] . identifier[filesystem] . identifier[alternative_path_separator] ,
identifier[self] . identifier[_os_path] . identifier[sep] )
identifier[start] = identifier[start] . identifier[replace] ( identifier[self] . identifier[filesystem] . identifier[alternative_path_separator] ,
identifier[self] . identifier[_os_path] . identifier[sep] )
identifier[path] = identifier[path] . identifier[replace] ( identifier[self] . identifier[filesystem] . identifier[path_separator] , identifier[self] . identifier[_os_path] . identifier[sep] )
identifier[start] = identifier[start] . identifier[replace] (
identifier[self] . identifier[filesystem] . identifier[path_separator] , identifier[self] . identifier[_os_path] . identifier[sep] )
identifier[path] = identifier[self] . identifier[_os_path] . identifier[relpath] ( identifier[path] , identifier[start] )
keyword[return] identifier[path] . identifier[replace] ( identifier[self] . identifier[_os_path] . identifier[sep] , identifier[self] . identifier[filesystem] . identifier[path_separator] ) | def relpath(self, path, start=None):
"""We mostly rely on the native implementation and adapt the
path separator."""
if not path:
raise ValueError('no path specified') # depends on [control=['if'], data=[]]
path = make_string_path(path)
if start is not None:
start = make_string_path(start) # depends on [control=['if'], data=['start']]
else:
start = self.filesystem.cwd
if self.filesystem.alternative_path_separator is not None:
path = path.replace(self.filesystem.alternative_path_separator, self._os_path.sep)
start = start.replace(self.filesystem.alternative_path_separator, self._os_path.sep) # depends on [control=['if'], data=[]]
path = path.replace(self.filesystem.path_separator, self._os_path.sep)
start = start.replace(self.filesystem.path_separator, self._os_path.sep)
path = self._os_path.relpath(path, start)
return path.replace(self._os_path.sep, self.filesystem.path_separator) |
def reg_on_abort(self, callable_object, *args, **kwargs):
""" Register a function/method to be called when execution is aborted"""
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'abort', persistent, *args, **kwargs)
self.abort_callbacks.append(event)
return event | def function[reg_on_abort, parameter[self, callable_object]]:
constant[ Register a function/method to be called when execution is aborted]
variable[persistent] assign[=] call[name[kwargs].pop, parameter[constant[persistent], constant[False]]]
variable[event] assign[=] call[name[self]._create_event, parameter[name[callable_object], constant[abort], name[persistent], <ast.Starred object at 0x7da20c76da50>]]
call[name[self].abort_callbacks.append, parameter[name[event]]]
return[name[event]] | keyword[def] identifier[reg_on_abort] ( identifier[self] , identifier[callable_object] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[persistent] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[event] = identifier[self] . identifier[_create_event] ( identifier[callable_object] , literal[string] , identifier[persistent] ,* identifier[args] ,** identifier[kwargs] )
identifier[self] . identifier[abort_callbacks] . identifier[append] ( identifier[event] )
keyword[return] identifier[event] | def reg_on_abort(self, callable_object, *args, **kwargs):
""" Register a function/method to be called when execution is aborted"""
persistent = kwargs.pop('persistent', False)
event = self._create_event(callable_object, 'abort', persistent, *args, **kwargs)
self.abort_callbacks.append(event)
return event |
def cluster_sample5():
"Start with wrong number of clusters."
start_centers = [[0.0, 1.0], [0.0, 0.0]]
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE5, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE5, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH) | def function[cluster_sample5, parameter[]]:
constant[Start with wrong number of clusters.]
variable[start_centers] assign[=] list[[<ast.List object at 0x7da18ede4ac0>, <ast.List object at 0x7da1b01d9420>]]
call[name[template_clustering], parameter[name[start_centers], name[SIMPLE_SAMPLES].SAMPLE_SIMPLE5]]
call[name[template_clustering], parameter[name[start_centers], name[SIMPLE_SAMPLES].SAMPLE_SIMPLE5]] | keyword[def] identifier[cluster_sample5] ():
literal[string]
identifier[start_centers] =[[ literal[int] , literal[int] ],[ literal[int] , literal[int] ]]
identifier[template_clustering] ( identifier[start_centers] , identifier[SIMPLE_SAMPLES] . identifier[SAMPLE_SIMPLE5] , identifier[criterion] = identifier[splitting_type] . identifier[BAYESIAN_INFORMATION_CRITERION] )
identifier[template_clustering] ( identifier[start_centers] , identifier[SIMPLE_SAMPLES] . identifier[SAMPLE_SIMPLE5] , identifier[criterion] = identifier[splitting_type] . identifier[MINIMUM_NOISELESS_DESCRIPTION_LENGTH] ) | def cluster_sample5():
"""Start with wrong number of clusters."""
start_centers = [[0.0, 1.0], [0.0, 0.0]]
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE5, criterion=splitting_type.BAYESIAN_INFORMATION_CRITERION)
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE5, criterion=splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH) |
def _get_vqa_v2_image_feature_dataset(
directory, feature_url, feature_filename="mscoco_feat.tar.gz"):
"""Extract the VQA V2 feature data set to directory unless it's there."""
feature_file = generator_utils.maybe_download_from_drive(
directory, feature_filename, feature_url)
with tarfile.open(feature_file, "r:gz") as feature_tar:
feature_tar.extractall(directory) | def function[_get_vqa_v2_image_feature_dataset, parameter[directory, feature_url, feature_filename]]:
constant[Extract the VQA V2 feature data set to directory unless it's there.]
variable[feature_file] assign[=] call[name[generator_utils].maybe_download_from_drive, parameter[name[directory], name[feature_filename], name[feature_url]]]
with call[name[tarfile].open, parameter[name[feature_file], constant[r:gz]]] begin[:]
call[name[feature_tar].extractall, parameter[name[directory]]] | keyword[def] identifier[_get_vqa_v2_image_feature_dataset] (
identifier[directory] , identifier[feature_url] , identifier[feature_filename] = literal[string] ):
literal[string]
identifier[feature_file] = identifier[generator_utils] . identifier[maybe_download_from_drive] (
identifier[directory] , identifier[feature_filename] , identifier[feature_url] )
keyword[with] identifier[tarfile] . identifier[open] ( identifier[feature_file] , literal[string] ) keyword[as] identifier[feature_tar] :
identifier[feature_tar] . identifier[extractall] ( identifier[directory] ) | def _get_vqa_v2_image_feature_dataset(directory, feature_url, feature_filename='mscoco_feat.tar.gz'):
"""Extract the VQA V2 feature data set to directory unless it's there."""
feature_file = generator_utils.maybe_download_from_drive(directory, feature_filename, feature_url)
with tarfile.open(feature_file, 'r:gz') as feature_tar:
feature_tar.extractall(directory) # depends on [control=['with'], data=['feature_tar']] |
def tweet_list_handler(request, tweet_list_builder, msg_prefix=""):
""" This is a generic function to handle any intent that reads out a list of tweets"""
# tweet_list_builder is a function that takes a unique identifier and returns a list of things to say
tweets = tweet_list_builder(request.access_token())
print (len(tweets), 'tweets found')
if tweets:
twitter_cache.initialize_user_queue(user_id=request.access_token(),
queue=tweets)
text_to_read_out = twitter_cache.user_queue(request.access_token()).read_out_next(MAX_RESPONSE_TWEETS)
message = msg_prefix + text_to_read_out + ", say 'next' to hear more, or reply to a tweet by number."
return alexa.create_response(message=message,
end_session=False)
else:
return alexa.create_response(message="Sorry, no tweets found, please try something else",
end_session=False) | def function[tweet_list_handler, parameter[request, tweet_list_builder, msg_prefix]]:
constant[ This is a generic function to handle any intent that reads out a list of tweets]
variable[tweets] assign[=] call[name[tweet_list_builder], parameter[call[name[request].access_token, parameter[]]]]
call[name[print], parameter[call[name[len], parameter[name[tweets]]], constant[tweets found]]]
if name[tweets] begin[:]
call[name[twitter_cache].initialize_user_queue, parameter[]]
variable[text_to_read_out] assign[=] call[call[name[twitter_cache].user_queue, parameter[call[name[request].access_token, parameter[]]]].read_out_next, parameter[name[MAX_RESPONSE_TWEETS]]]
variable[message] assign[=] binary_operation[binary_operation[name[msg_prefix] + name[text_to_read_out]] + constant[, say 'next' to hear more, or reply to a tweet by number.]]
return[call[name[alexa].create_response, parameter[]]] | keyword[def] identifier[tweet_list_handler] ( identifier[request] , identifier[tweet_list_builder] , identifier[msg_prefix] = literal[string] ):
literal[string]
identifier[tweets] = identifier[tweet_list_builder] ( identifier[request] . identifier[access_token] ())
identifier[print] ( identifier[len] ( identifier[tweets] ), literal[string] )
keyword[if] identifier[tweets] :
identifier[twitter_cache] . identifier[initialize_user_queue] ( identifier[user_id] = identifier[request] . identifier[access_token] (),
identifier[queue] = identifier[tweets] )
identifier[text_to_read_out] = identifier[twitter_cache] . identifier[user_queue] ( identifier[request] . identifier[access_token] ()). identifier[read_out_next] ( identifier[MAX_RESPONSE_TWEETS] )
identifier[message] = identifier[msg_prefix] + identifier[text_to_read_out] + literal[string]
keyword[return] identifier[alexa] . identifier[create_response] ( identifier[message] = identifier[message] ,
identifier[end_session] = keyword[False] )
keyword[else] :
keyword[return] identifier[alexa] . identifier[create_response] ( identifier[message] = literal[string] ,
identifier[end_session] = keyword[False] ) | def tweet_list_handler(request, tweet_list_builder, msg_prefix=''):
""" This is a generic function to handle any intent that reads out a list of tweets"""
# tweet_list_builder is a function that takes a unique identifier and returns a list of things to say
tweets = tweet_list_builder(request.access_token())
print(len(tweets), 'tweets found')
if tweets:
twitter_cache.initialize_user_queue(user_id=request.access_token(), queue=tweets)
text_to_read_out = twitter_cache.user_queue(request.access_token()).read_out_next(MAX_RESPONSE_TWEETS)
message = msg_prefix + text_to_read_out + ", say 'next' to hear more, or reply to a tweet by number."
return alexa.create_response(message=message, end_session=False) # depends on [control=['if'], data=[]]
else:
return alexa.create_response(message='Sorry, no tweets found, please try something else', end_session=False) |
def or_(cls, *queries):
"""
根据传入的 Query 对象,构造一个新的 OR 查询。
:param queries: 需要构造的子查询列表
:rtype: Query
"""
if len(queries) < 2:
raise ValueError('or_ need two queries at least')
if not all(x._query_class._class_name == queries[0]._query_class._class_name for x in queries):
raise TypeError('All queries must be for the same class')
query = Query(queries[0]._query_class._class_name)
query._or_query(queries)
return query | def function[or_, parameter[cls]]:
constant[
根据传入的 Query 对象,构造一个新的 OR 查询。
:param queries: 需要构造的子查询列表
:rtype: Query
]
if compare[call[name[len], parameter[name[queries]]] less[<] constant[2]] begin[:]
<ast.Raise object at 0x7da1b0ef9c30>
if <ast.UnaryOp object at 0x7da1b0efb0d0> begin[:]
<ast.Raise object at 0x7da1b0efaa10>
variable[query] assign[=] call[name[Query], parameter[call[name[queries]][constant[0]]._query_class._class_name]]
call[name[query]._or_query, parameter[name[queries]]]
return[name[query]] | keyword[def] identifier[or_] ( identifier[cls] ,* identifier[queries] ):
literal[string]
keyword[if] identifier[len] ( identifier[queries] )< literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[all] ( identifier[x] . identifier[_query_class] . identifier[_class_name] == identifier[queries] [ literal[int] ]. identifier[_query_class] . identifier[_class_name] keyword[for] identifier[x] keyword[in] identifier[queries] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[query] = identifier[Query] ( identifier[queries] [ literal[int] ]. identifier[_query_class] . identifier[_class_name] )
identifier[query] . identifier[_or_query] ( identifier[queries] )
keyword[return] identifier[query] | def or_(cls, *queries):
"""
根据传入的 Query 对象,构造一个新的 OR 查询。
:param queries: 需要构造的子查询列表
:rtype: Query
"""
if len(queries) < 2:
raise ValueError('or_ need two queries at least') # depends on [control=['if'], data=[]]
if not all((x._query_class._class_name == queries[0]._query_class._class_name for x in queries)):
raise TypeError('All queries must be for the same class') # depends on [control=['if'], data=[]]
query = Query(queries[0]._query_class._class_name)
query._or_query(queries)
return query |
def stop_conditions(self, G, E, I, known_winners, stats):
"""
Determines if G, E state can be ended early
:param G: networkx DiGraph of the current representation of "locked in" edges in RP
:param E: networkx DiGraph of the remaining edges not yet considered
:param I: list of all nodes
:param known_winners: list of currently known PUT-winners
:param stats: Stats object containing runtime statistics
:return: -1 if no stop condition met, otherwise returns the int of the stop condition
"""
in_deg = G.in_degree(I)
possible_winners = [x[0] for x in in_deg if x[1] == 0]
# Stop Condition 2: Pruning. Possible winners are subset of known winners
if set(possible_winners) <= known_winners:
stats.stop_condition_hits[2] += 1
if self.debug_mode >= 2:
print("Stop Condition 2: pruned")
return 2
# Stop Condition 3: Exactly one node has indegree 0
if len(possible_winners) == 1:
stats.stop_condition_hits[3] += 1
if self.debug_mode >= 2:
print("Stop Condition 3: one cand in degree 0")
self.add_winners(G, I, known_winners, stats, possible_winners)
return 3
# Stop Condition 1: G U E is acyclic
temp_G = nx.compose(G, E)
if nx.is_directed_acyclic_graph(temp_G) is True:
stats.stop_condition_hits[1] += 1
if self.debug_mode >= 2:
print("Stop Condition 1: acyclic")
self.add_winners(G, I, known_winners, stats)
return 1
return -1 | def function[stop_conditions, parameter[self, G, E, I, known_winners, stats]]:
constant[
Determines if G, E state can be ended early
:param G: networkx DiGraph of the current representation of "locked in" edges in RP
:param E: networkx DiGraph of the remaining edges not yet considered
:param I: list of all nodes
:param known_winners: list of currently known PUT-winners
:param stats: Stats object containing runtime statistics
:return: -1 if no stop condition met, otherwise returns the int of the stop condition
]
variable[in_deg] assign[=] call[name[G].in_degree, parameter[name[I]]]
variable[possible_winners] assign[=] <ast.ListComp object at 0x7da20c6c7730>
if compare[call[name[set], parameter[name[possible_winners]]] less_or_equal[<=] name[known_winners]] begin[:]
<ast.AugAssign object at 0x7da18f8126e0>
if compare[name[self].debug_mode greater_or_equal[>=] constant[2]] begin[:]
call[name[print], parameter[constant[Stop Condition 2: pruned]]]
return[constant[2]]
if compare[call[name[len], parameter[name[possible_winners]]] equal[==] constant[1]] begin[:]
<ast.AugAssign object at 0x7da20c6c4610>
if compare[name[self].debug_mode greater_or_equal[>=] constant[2]] begin[:]
call[name[print], parameter[constant[Stop Condition 3: one cand in degree 0]]]
call[name[self].add_winners, parameter[name[G], name[I], name[known_winners], name[stats], name[possible_winners]]]
return[constant[3]]
variable[temp_G] assign[=] call[name[nx].compose, parameter[name[G], name[E]]]
if compare[call[name[nx].is_directed_acyclic_graph, parameter[name[temp_G]]] is constant[True]] begin[:]
<ast.AugAssign object at 0x7da18f811ab0>
if compare[name[self].debug_mode greater_or_equal[>=] constant[2]] begin[:]
call[name[print], parameter[constant[Stop Condition 1: acyclic]]]
call[name[self].add_winners, parameter[name[G], name[I], name[known_winners], name[stats]]]
return[constant[1]]
return[<ast.UnaryOp object at 0x7da18f812860>] | keyword[def] identifier[stop_conditions] ( identifier[self] , identifier[G] , identifier[E] , identifier[I] , identifier[known_winners] , identifier[stats] ):
literal[string]
identifier[in_deg] = identifier[G] . identifier[in_degree] ( identifier[I] )
identifier[possible_winners] =[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[in_deg] keyword[if] identifier[x] [ literal[int] ]== literal[int] ]
keyword[if] identifier[set] ( identifier[possible_winners] )<= identifier[known_winners] :
identifier[stats] . identifier[stop_condition_hits] [ literal[int] ]+= literal[int]
keyword[if] identifier[self] . identifier[debug_mode] >= literal[int] :
identifier[print] ( literal[string] )
keyword[return] literal[int]
keyword[if] identifier[len] ( identifier[possible_winners] )== literal[int] :
identifier[stats] . identifier[stop_condition_hits] [ literal[int] ]+= literal[int]
keyword[if] identifier[self] . identifier[debug_mode] >= literal[int] :
identifier[print] ( literal[string] )
identifier[self] . identifier[add_winners] ( identifier[G] , identifier[I] , identifier[known_winners] , identifier[stats] , identifier[possible_winners] )
keyword[return] literal[int]
identifier[temp_G] = identifier[nx] . identifier[compose] ( identifier[G] , identifier[E] )
keyword[if] identifier[nx] . identifier[is_directed_acyclic_graph] ( identifier[temp_G] ) keyword[is] keyword[True] :
identifier[stats] . identifier[stop_condition_hits] [ literal[int] ]+= literal[int]
keyword[if] identifier[self] . identifier[debug_mode] >= literal[int] :
identifier[print] ( literal[string] )
identifier[self] . identifier[add_winners] ( identifier[G] , identifier[I] , identifier[known_winners] , identifier[stats] )
keyword[return] literal[int]
keyword[return] - literal[int] | def stop_conditions(self, G, E, I, known_winners, stats):
"""
Determines if G, E state can be ended early
:param G: networkx DiGraph of the current representation of "locked in" edges in RP
:param E: networkx DiGraph of the remaining edges not yet considered
:param I: list of all nodes
:param known_winners: list of currently known PUT-winners
:param stats: Stats object containing runtime statistics
:return: -1 if no stop condition met, otherwise returns the int of the stop condition
"""
in_deg = G.in_degree(I)
possible_winners = [x[0] for x in in_deg if x[1] == 0]
# Stop Condition 2: Pruning. Possible winners are subset of known winners
if set(possible_winners) <= known_winners:
stats.stop_condition_hits[2] += 1
if self.debug_mode >= 2:
print('Stop Condition 2: pruned') # depends on [control=['if'], data=[]]
return 2 # depends on [control=['if'], data=[]]
# Stop Condition 3: Exactly one node has indegree 0
if len(possible_winners) == 1:
stats.stop_condition_hits[3] += 1
if self.debug_mode >= 2:
print('Stop Condition 3: one cand in degree 0') # depends on [control=['if'], data=[]]
self.add_winners(G, I, known_winners, stats, possible_winners)
return 3 # depends on [control=['if'], data=[]]
# Stop Condition 1: G U E is acyclic
temp_G = nx.compose(G, E)
if nx.is_directed_acyclic_graph(temp_G) is True:
stats.stop_condition_hits[1] += 1
if self.debug_mode >= 2:
print('Stop Condition 1: acyclic') # depends on [control=['if'], data=[]]
self.add_winners(G, I, known_winners, stats)
return 1 # depends on [control=['if'], data=[]]
return -1 |
def identifiers(self, identifiers):
"""
:type identifiers: subject_abcs.IdentifierCollection
"""
if (isinstance(identifiers, subject_abcs.IdentifierCollection) or
identifiers is None):
self._identifiers = identifiers
else:
raise ValueError('must use IdentifierCollection') | def function[identifiers, parameter[self, identifiers]]:
constant[
:type identifiers: subject_abcs.IdentifierCollection
]
if <ast.BoolOp object at 0x7da20c76c1c0> begin[:]
name[self]._identifiers assign[=] name[identifiers] | keyword[def] identifier[identifiers] ( identifier[self] , identifier[identifiers] ):
literal[string]
keyword[if] ( identifier[isinstance] ( identifier[identifiers] , identifier[subject_abcs] . identifier[IdentifierCollection] ) keyword[or]
identifier[identifiers] keyword[is] keyword[None] ):
identifier[self] . identifier[_identifiers] = identifier[identifiers]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def identifiers(self, identifiers):
"""
:type identifiers: subject_abcs.IdentifierCollection
"""
if isinstance(identifiers, subject_abcs.IdentifierCollection) or identifiers is None:
self._identifiers = identifiers # depends on [control=['if'], data=[]]
else:
raise ValueError('must use IdentifierCollection') |
def join(self, network):
"""
Join a zerotier network
:param network: network id to join
:return:
"""
args = {'network': network}
self._network_chk.check(args)
response = self._client.raw('zerotier.join', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to join zerotier network: %s', result.stderr) | def function[join, parameter[self, network]]:
constant[
Join a zerotier network
:param network: network id to join
:return:
]
variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da1b04c88e0>], [<ast.Name object at 0x7da1b04c95d0>]]
call[name[self]._network_chk.check, parameter[name[args]]]
variable[response] assign[=] call[name[self]._client.raw, parameter[constant[zerotier.join], name[args]]]
variable[result] assign[=] call[name[response].get, parameter[]]
if compare[name[result].state not_equal[!=] constant[SUCCESS]] begin[:]
<ast.Raise object at 0x7da1b04c9f30> | keyword[def] identifier[join] ( identifier[self] , identifier[network] ):
literal[string]
identifier[args] ={ literal[string] : identifier[network] }
identifier[self] . identifier[_network_chk] . identifier[check] ( identifier[args] )
identifier[response] = identifier[self] . identifier[_client] . identifier[raw] ( literal[string] , identifier[args] )
identifier[result] = identifier[response] . identifier[get] ()
keyword[if] identifier[result] . identifier[state] != literal[string] :
keyword[raise] identifier[RuntimeError] ( literal[string] , identifier[result] . identifier[stderr] ) | def join(self, network):
"""
Join a zerotier network
:param network: network id to join
:return:
"""
args = {'network': network}
self._network_chk.check(args)
response = self._client.raw('zerotier.join', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to join zerotier network: %s', result.stderr) # depends on [control=['if'], data=[]] |
def set_pubsub_channels(self, request, channels):
"""
Initialize the channels used for publishing and subscribing messages through the message queue.
"""
facility = request.path_info.replace(settings.WEBSOCKET_URL, '', 1)
# initialize publishers
audience = {
'users': 'publish-user' in channels and [SELF] or [],
'groups': 'publish-group' in channels and [SELF] or [],
'sessions': 'publish-session' in channels and [SELF] or [],
'broadcast': 'publish-broadcast' in channels,
}
self._publishers = set()
for key in self._get_message_channels(request=request, facility=facility, **audience):
self._publishers.add(key)
# initialize subscribers
audience = {
'users': 'subscribe-user' in channels and [SELF] or [],
'groups': 'subscribe-group' in channels and [SELF] or [],
'sessions': 'subscribe-session' in channels and [SELF] or [],
'broadcast': 'subscribe-broadcast' in channels,
}
self._subscription = self._connection.pubsub()
for key in self._get_message_channels(request=request, facility=facility, **audience):
self._subscription.subscribe(key) | def function[set_pubsub_channels, parameter[self, request, channels]]:
constant[
Initialize the channels used for publishing and subscribing messages through the message queue.
]
variable[facility] assign[=] call[name[request].path_info.replace, parameter[name[settings].WEBSOCKET_URL, constant[], constant[1]]]
variable[audience] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d50100>, <ast.Constant object at 0x7da1b1d51540>, <ast.Constant object at 0x7da1b1d51150>, <ast.Constant object at 0x7da1b1d53310>], [<ast.BoolOp object at 0x7da1b1d51ed0>, <ast.BoolOp object at 0x7da1b1d52fe0>, <ast.BoolOp object at 0x7da1b1d529b0>, <ast.Compare object at 0x7da20eb64670>]]
name[self]._publishers assign[=] call[name[set], parameter[]]
for taget[name[key]] in starred[call[name[self]._get_message_channels, parameter[]]] begin[:]
call[name[self]._publishers.add, parameter[name[key]]]
variable[audience] assign[=] dictionary[[<ast.Constant object at 0x7da204621f00>, <ast.Constant object at 0x7da204621f90>, <ast.Constant object at 0x7da2046232e0>, <ast.Constant object at 0x7da204623460>], [<ast.BoolOp object at 0x7da204622b30>, <ast.BoolOp object at 0x7da204621450>, <ast.BoolOp object at 0x7da204623820>, <ast.Compare object at 0x7da204623280>]]
name[self]._subscription assign[=] call[name[self]._connection.pubsub, parameter[]]
for taget[name[key]] in starred[call[name[self]._get_message_channels, parameter[]]] begin[:]
call[name[self]._subscription.subscribe, parameter[name[key]]] | keyword[def] identifier[set_pubsub_channels] ( identifier[self] , identifier[request] , identifier[channels] ):
literal[string]
identifier[facility] = identifier[request] . identifier[path_info] . identifier[replace] ( identifier[settings] . identifier[WEBSOCKET_URL] , literal[string] , literal[int] )
identifier[audience] ={
literal[string] : literal[string] keyword[in] identifier[channels] keyword[and] [ identifier[SELF] ] keyword[or] [],
literal[string] : literal[string] keyword[in] identifier[channels] keyword[and] [ identifier[SELF] ] keyword[or] [],
literal[string] : literal[string] keyword[in] identifier[channels] keyword[and] [ identifier[SELF] ] keyword[or] [],
literal[string] : literal[string] keyword[in] identifier[channels] ,
}
identifier[self] . identifier[_publishers] = identifier[set] ()
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[_get_message_channels] ( identifier[request] = identifier[request] , identifier[facility] = identifier[facility] ,** identifier[audience] ):
identifier[self] . identifier[_publishers] . identifier[add] ( identifier[key] )
identifier[audience] ={
literal[string] : literal[string] keyword[in] identifier[channels] keyword[and] [ identifier[SELF] ] keyword[or] [],
literal[string] : literal[string] keyword[in] identifier[channels] keyword[and] [ identifier[SELF] ] keyword[or] [],
literal[string] : literal[string] keyword[in] identifier[channels] keyword[and] [ identifier[SELF] ] keyword[or] [],
literal[string] : literal[string] keyword[in] identifier[channels] ,
}
identifier[self] . identifier[_subscription] = identifier[self] . identifier[_connection] . identifier[pubsub] ()
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[_get_message_channels] ( identifier[request] = identifier[request] , identifier[facility] = identifier[facility] ,** identifier[audience] ):
identifier[self] . identifier[_subscription] . identifier[subscribe] ( identifier[key] ) | def set_pubsub_channels(self, request, channels):
"""
Initialize the channels used for publishing and subscribing messages through the message queue.
"""
facility = request.path_info.replace(settings.WEBSOCKET_URL, '', 1)
# initialize publishers
audience = {'users': 'publish-user' in channels and [SELF] or [], 'groups': 'publish-group' in channels and [SELF] or [], 'sessions': 'publish-session' in channels and [SELF] or [], 'broadcast': 'publish-broadcast' in channels}
self._publishers = set()
for key in self._get_message_channels(request=request, facility=facility, **audience):
self._publishers.add(key) # depends on [control=['for'], data=['key']]
# initialize subscribers
audience = {'users': 'subscribe-user' in channels and [SELF] or [], 'groups': 'subscribe-group' in channels and [SELF] or [], 'sessions': 'subscribe-session' in channels and [SELF] or [], 'broadcast': 'subscribe-broadcast' in channels}
self._subscription = self._connection.pubsub()
for key in self._get_message_channels(request=request, facility=facility, **audience):
self._subscription.subscribe(key) # depends on [control=['for'], data=['key']] |
def showBeamlines(self):
""" show all defined beamlines
"""
cnt = 0
blidlist = []
for k in self.all_elements:
try:
if 'beamline' in self.all_elements.get(k):
cnt += 1
blidlist.append(k)
except:
pass
retstr = '{total:<3d}beamlines: {allbl}'.format(total=cnt,
allbl=';'.join(blidlist))
return retstr | def function[showBeamlines, parameter[self]]:
constant[ show all defined beamlines
]
variable[cnt] assign[=] constant[0]
variable[blidlist] assign[=] list[[]]
for taget[name[k]] in starred[name[self].all_elements] begin[:]
<ast.Try object at 0x7da1b09bf0a0>
variable[retstr] assign[=] call[constant[{total:<3d}beamlines: {allbl}].format, parameter[]]
return[name[retstr]] | keyword[def] identifier[showBeamlines] ( identifier[self] ):
literal[string]
identifier[cnt] = literal[int]
identifier[blidlist] =[]
keyword[for] identifier[k] keyword[in] identifier[self] . identifier[all_elements] :
keyword[try] :
keyword[if] literal[string] keyword[in] identifier[self] . identifier[all_elements] . identifier[get] ( identifier[k] ):
identifier[cnt] += literal[int]
identifier[blidlist] . identifier[append] ( identifier[k] )
keyword[except] :
keyword[pass]
identifier[retstr] = literal[string] . identifier[format] ( identifier[total] = identifier[cnt] ,
identifier[allbl] = literal[string] . identifier[join] ( identifier[blidlist] ))
keyword[return] identifier[retstr] | def showBeamlines(self):
""" show all defined beamlines
"""
cnt = 0
blidlist = []
for k in self.all_elements:
try:
if 'beamline' in self.all_elements.get(k):
cnt += 1
blidlist.append(k) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['k']]
retstr = '{total:<3d}beamlines: {allbl}'.format(total=cnt, allbl=';'.join(blidlist))
return retstr |
def patch_worker_run_task():
"""
Patches the ``luigi.worker.Worker._run_task`` method to store the worker id and the id of its
first task in the task. This information is required by the sandboxing mechanism
"""
_run_task = luigi.worker.Worker._run_task
def run_task(self, task_id):
task = self._scheduled_tasks[task_id]
task._worker_id = self._id
task._worker_task = self._first_task
try:
_run_task(self, task_id)
finally:
task._worker_id = None
task._worker_task = None
# make worker disposable when sandboxed
if os.getenv("LAW_SANDBOX_SWITCHED") == "1":
self._start_phasing_out()
luigi.worker.Worker._run_task = run_task | def function[patch_worker_run_task, parameter[]]:
constant[
Patches the ``luigi.worker.Worker._run_task`` method to store the worker id and the id of its
first task in the task. This information is required by the sandboxing mechanism
]
variable[_run_task] assign[=] name[luigi].worker.Worker._run_task
def function[run_task, parameter[self, task_id]]:
variable[task] assign[=] call[name[self]._scheduled_tasks][name[task_id]]
name[task]._worker_id assign[=] name[self]._id
name[task]._worker_task assign[=] name[self]._first_task
<ast.Try object at 0x7da1b05efbb0>
if compare[call[name[os].getenv, parameter[constant[LAW_SANDBOX_SWITCHED]]] equal[==] constant[1]] begin[:]
call[name[self]._start_phasing_out, parameter[]]
name[luigi].worker.Worker._run_task assign[=] name[run_task] | keyword[def] identifier[patch_worker_run_task] ():
literal[string]
identifier[_run_task] = identifier[luigi] . identifier[worker] . identifier[Worker] . identifier[_run_task]
keyword[def] identifier[run_task] ( identifier[self] , identifier[task_id] ):
identifier[task] = identifier[self] . identifier[_scheduled_tasks] [ identifier[task_id] ]
identifier[task] . identifier[_worker_id] = identifier[self] . identifier[_id]
identifier[task] . identifier[_worker_task] = identifier[self] . identifier[_first_task]
keyword[try] :
identifier[_run_task] ( identifier[self] , identifier[task_id] )
keyword[finally] :
identifier[task] . identifier[_worker_id] = keyword[None]
identifier[task] . identifier[_worker_task] = keyword[None]
keyword[if] identifier[os] . identifier[getenv] ( literal[string] )== literal[string] :
identifier[self] . identifier[_start_phasing_out] ()
identifier[luigi] . identifier[worker] . identifier[Worker] . identifier[_run_task] = identifier[run_task] | def patch_worker_run_task():
"""
Patches the ``luigi.worker.Worker._run_task`` method to store the worker id and the id of its
first task in the task. This information is required by the sandboxing mechanism
"""
_run_task = luigi.worker.Worker._run_task
def run_task(self, task_id):
task = self._scheduled_tasks[task_id]
task._worker_id = self._id
task._worker_task = self._first_task
try:
_run_task(self, task_id) # depends on [control=['try'], data=[]]
finally:
task._worker_id = None
task._worker_task = None
# make worker disposable when sandboxed
if os.getenv('LAW_SANDBOX_SWITCHED') == '1':
self._start_phasing_out() # depends on [control=['if'], data=[]]
luigi.worker.Worker._run_task = run_task |
def _words_by_distinctiveness_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None,
least_to_most=False):
"""Return words in `vocab` ordered by distinctiveness score."""
p_t = get_marginal_topic_distrib(doc_topic_distrib, doc_lengths)
distinct = get_word_distinctiveness(topic_word_distrib, p_t)
return _words_by_score(vocab, distinct, least_to_most=least_to_most, n=n) | def function[_words_by_distinctiveness_score, parameter[vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n, least_to_most]]:
constant[Return words in `vocab` ordered by distinctiveness score.]
variable[p_t] assign[=] call[name[get_marginal_topic_distrib], parameter[name[doc_topic_distrib], name[doc_lengths]]]
variable[distinct] assign[=] call[name[get_word_distinctiveness], parameter[name[topic_word_distrib], name[p_t]]]
return[call[name[_words_by_score], parameter[name[vocab], name[distinct]]]] | keyword[def] identifier[_words_by_distinctiveness_score] ( identifier[vocab] , identifier[topic_word_distrib] , identifier[doc_topic_distrib] , identifier[doc_lengths] , identifier[n] = keyword[None] ,
identifier[least_to_most] = keyword[False] ):
literal[string]
identifier[p_t] = identifier[get_marginal_topic_distrib] ( identifier[doc_topic_distrib] , identifier[doc_lengths] )
identifier[distinct] = identifier[get_word_distinctiveness] ( identifier[topic_word_distrib] , identifier[p_t] )
keyword[return] identifier[_words_by_score] ( identifier[vocab] , identifier[distinct] , identifier[least_to_most] = identifier[least_to_most] , identifier[n] = identifier[n] ) | def _words_by_distinctiveness_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None, least_to_most=False):
"""Return words in `vocab` ordered by distinctiveness score."""
p_t = get_marginal_topic_distrib(doc_topic_distrib, doc_lengths)
distinct = get_word_distinctiveness(topic_word_distrib, p_t)
return _words_by_score(vocab, distinct, least_to_most=least_to_most, n=n) |
def init(req, model): # pylint: disable=unused-argument
""" Determine the pagination preference by query parameter
Numbers only, >=0, & each query param may only be
specified once.
:return: Paginator object
"""
limit = req.get_param('page[limit]') or goldman.config.PAGE_LIMIT
offset = req.get_param('page[offset]') or 0
try:
return Paginator(limit, offset)
except ValueError:
raise InvalidQueryParams(**{
'detail': 'The page[\'limit\'] & page[\'offset\'] query '
'params may only be specified once each & must '
'both be an integer >= 0.',
'links': 'jsonapi.org/format/#fetching-pagination',
'parameter': 'page',
}) | def function[init, parameter[req, model]]:
constant[ Determine the pagination preference by query parameter
Numbers only, >=0, & each query param may only be
specified once.
:return: Paginator object
]
variable[limit] assign[=] <ast.BoolOp object at 0x7da204960070>
variable[offset] assign[=] <ast.BoolOp object at 0x7da204960e80>
<ast.Try object at 0x7da1b1320400> | keyword[def] identifier[init] ( identifier[req] , identifier[model] ):
literal[string]
identifier[limit] = identifier[req] . identifier[get_param] ( literal[string] ) keyword[or] identifier[goldman] . identifier[config] . identifier[PAGE_LIMIT]
identifier[offset] = identifier[req] . identifier[get_param] ( literal[string] ) keyword[or] literal[int]
keyword[try] :
keyword[return] identifier[Paginator] ( identifier[limit] , identifier[offset] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[InvalidQueryParams] (**{
literal[string] : literal[string]
literal[string]
literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}) | def init(req, model): # pylint: disable=unused-argument
' Determine the pagination preference by query parameter\n\n Numbers only, >=0, & each query param may only be\n specified once.\n\n :return: Paginator object\n '
limit = req.get_param('page[limit]') or goldman.config.PAGE_LIMIT
offset = req.get_param('page[offset]') or 0
try:
return Paginator(limit, offset) # depends on [control=['try'], data=[]]
except ValueError:
raise InvalidQueryParams(**{'detail': "The page['limit'] & page['offset'] query params may only be specified once each & must both be an integer >= 0.", 'links': 'jsonapi.org/format/#fetching-pagination', 'parameter': 'page'}) # depends on [control=['except'], data=[]] |
def string_escape(string, delimiter='"'):
"""Turns special characters into escape sequences in the provided string.
Supports both byte strings and unicode strings properly. Any other values
will produce None.
Example:
>>> string_escape("a line\t")
"a line\\t"
>>> string_escape(u"some fancy character: \\u9999")
u"\\u9999"
>>> string_escape(5)
None
"""
if isinstance(string, str):
escaped = string.encode("string-escape")
elif isinstance(string, str):
escaped = str(string.encode("unicode-escape"))
else:
raise Error("Unexpected string type.")
return delimiter + escape_quotes(escaped, delimiter) + delimiter | def function[string_escape, parameter[string, delimiter]]:
constant[Turns special characters into escape sequences in the provided string.
Supports both byte strings and unicode strings properly. Any other values
will produce None.
Example:
>>> string_escape("a line ")
"a line\t"
>>> string_escape(u"some fancy character: \u9999")
u"\u9999"
>>> string_escape(5)
None
]
if call[name[isinstance], parameter[name[string], name[str]]] begin[:]
variable[escaped] assign[=] call[name[string].encode, parameter[constant[string-escape]]]
return[binary_operation[binary_operation[name[delimiter] + call[name[escape_quotes], parameter[name[escaped], name[delimiter]]]] + name[delimiter]]] | keyword[def] identifier[string_escape] ( identifier[string] , identifier[delimiter] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[string] , identifier[str] ):
identifier[escaped] = identifier[string] . identifier[encode] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[string] , identifier[str] ):
identifier[escaped] = identifier[str] ( identifier[string] . identifier[encode] ( literal[string] ))
keyword[else] :
keyword[raise] identifier[Error] ( literal[string] )
keyword[return] identifier[delimiter] + identifier[escape_quotes] ( identifier[escaped] , identifier[delimiter] )+ identifier[delimiter] | def string_escape(string, delimiter='"'):
"""Turns special characters into escape sequences in the provided string.
Supports both byte strings and unicode strings properly. Any other values
will produce None.
Example:
>>> string_escape("a line ")
"a line\\t"
>>> string_escape(u"some fancy character: \\u9999")
u"\\u9999"
>>> string_escape(5)
None
"""
if isinstance(string, str):
escaped = string.encode('string-escape') # depends on [control=['if'], data=[]]
elif isinstance(string, str):
escaped = str(string.encode('unicode-escape')) # depends on [control=['if'], data=[]]
else:
raise Error('Unexpected string type.')
return delimiter + escape_quotes(escaped, delimiter) + delimiter |
def concurrence(state):
"""Calculate the concurrence.
Args:
state (np.array): a quantum state (1x4 array) or a density matrix (4x4
array)
Returns:
float: concurrence.
Raises:
Exception: if attempted on more than two qubits.
"""
rho = np.array(state)
if rho.ndim == 1:
rho = outer(state)
if len(state) != 4:
raise Exception("Concurrence is only defined for more than two qubits")
YY = np.fliplr(np.diag([-1, 1, 1, -1]))
A = rho.dot(YY).dot(rho.conj()).dot(YY)
w = la.eigh(A, eigvals_only=True)
w = np.sqrt(np.maximum(w, 0))
return max(0.0, w[-1] - np.sum(w[0:-1])) | def function[concurrence, parameter[state]]:
constant[Calculate the concurrence.
Args:
state (np.array): a quantum state (1x4 array) or a density matrix (4x4
array)
Returns:
float: concurrence.
Raises:
Exception: if attempted on more than two qubits.
]
variable[rho] assign[=] call[name[np].array, parameter[name[state]]]
if compare[name[rho].ndim equal[==] constant[1]] begin[:]
variable[rho] assign[=] call[name[outer], parameter[name[state]]]
if compare[call[name[len], parameter[name[state]]] not_equal[!=] constant[4]] begin[:]
<ast.Raise object at 0x7da18f811900>
variable[YY] assign[=] call[name[np].fliplr, parameter[call[name[np].diag, parameter[list[[<ast.UnaryOp object at 0x7da18f811030>, <ast.Constant object at 0x7da18f810be0>, <ast.Constant object at 0x7da18f8117e0>, <ast.UnaryOp object at 0x7da18f812e90>]]]]]]
variable[A] assign[=] call[call[call[name[rho].dot, parameter[name[YY]]].dot, parameter[call[name[rho].conj, parameter[]]]].dot, parameter[name[YY]]]
variable[w] assign[=] call[name[la].eigh, parameter[name[A]]]
variable[w] assign[=] call[name[np].sqrt, parameter[call[name[np].maximum, parameter[name[w], constant[0]]]]]
return[call[name[max], parameter[constant[0.0], binary_operation[call[name[w]][<ast.UnaryOp object at 0x7da18f810d60>] - call[name[np].sum, parameter[call[name[w]][<ast.Slice object at 0x7da18f810b80>]]]]]]] | keyword[def] identifier[concurrence] ( identifier[state] ):
literal[string]
identifier[rho] = identifier[np] . identifier[array] ( identifier[state] )
keyword[if] identifier[rho] . identifier[ndim] == literal[int] :
identifier[rho] = identifier[outer] ( identifier[state] )
keyword[if] identifier[len] ( identifier[state] )!= literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[YY] = identifier[np] . identifier[fliplr] ( identifier[np] . identifier[diag] ([- literal[int] , literal[int] , literal[int] ,- literal[int] ]))
identifier[A] = identifier[rho] . identifier[dot] ( identifier[YY] ). identifier[dot] ( identifier[rho] . identifier[conj] ()). identifier[dot] ( identifier[YY] )
identifier[w] = identifier[la] . identifier[eigh] ( identifier[A] , identifier[eigvals_only] = keyword[True] )
identifier[w] = identifier[np] . identifier[sqrt] ( identifier[np] . identifier[maximum] ( identifier[w] , literal[int] ))
keyword[return] identifier[max] ( literal[int] , identifier[w] [- literal[int] ]- identifier[np] . identifier[sum] ( identifier[w] [ literal[int] :- literal[int] ])) | def concurrence(state):
"""Calculate the concurrence.
Args:
state (np.array): a quantum state (1x4 array) or a density matrix (4x4
array)
Returns:
float: concurrence.
Raises:
Exception: if attempted on more than two qubits.
"""
rho = np.array(state)
if rho.ndim == 1:
rho = outer(state) # depends on [control=['if'], data=[]]
if len(state) != 4:
raise Exception('Concurrence is only defined for more than two qubits') # depends on [control=['if'], data=[]]
YY = np.fliplr(np.diag([-1, 1, 1, -1]))
A = rho.dot(YY).dot(rho.conj()).dot(YY)
w = la.eigh(A, eigvals_only=True)
w = np.sqrt(np.maximum(w, 0))
return max(0.0, w[-1] - np.sum(w[0:-1])) |
def _send_offset_commit_request(self, offsets):
"""Commit offsets for the specified list of topics and partitions.
This is a non-blocking call which returns a request future that can be
polled in the case of a synchronous commit or ignored in the
asynchronous case.
Arguments:
offsets (dict of {TopicPartition: OffsetAndMetadata}): what should
be committed
Returns:
Future: indicating whether the commit was successful or not
"""
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), offsets))
assert all(map(lambda v: isinstance(v, OffsetAndMetadata),
offsets.values()))
if not offsets:
log.debug('No offsets to commit')
return Future().success(None)
node_id = self.coordinator()
if node_id is None:
return Future().failure(Errors.GroupCoordinatorNotAvailableError)
# create the offset commit request
offset_data = collections.defaultdict(dict)
for tp, offset in six.iteritems(offsets):
offset_data[tp.topic][tp.partition] = offset
if self._subscription.partitions_auto_assigned():
generation = self.generation()
else:
generation = Generation.NO_GENERATION
# if the generation is None, we are not part of an active group
# (and we expect to be). The only thing we can do is fail the commit
# and let the user rejoin the group in poll()
if self.config['api_version'] >= (0, 9) and generation is None:
return Future().failure(Errors.CommitFailedError())
if self.config['api_version'] >= (0, 9):
request = OffsetCommitRequest[2](
self.group_id,
generation.generation_id,
generation.member_id,
OffsetCommitRequest[2].DEFAULT_RETENTION_TIME,
[(
topic, [(
partition,
offset.offset,
offset.metadata
) for partition, offset in six.iteritems(partitions)]
) for topic, partitions in six.iteritems(offset_data)]
)
elif self.config['api_version'] >= (0, 8, 2):
request = OffsetCommitRequest[1](
self.group_id, -1, '',
[(
topic, [(
partition,
offset.offset,
-1,
offset.metadata
) for partition, offset in six.iteritems(partitions)]
) for topic, partitions in six.iteritems(offset_data)]
)
elif self.config['api_version'] >= (0, 8, 1):
request = OffsetCommitRequest[0](
self.group_id,
[(
topic, [(
partition,
offset.offset,
offset.metadata
) for partition, offset in six.iteritems(partitions)]
) for topic, partitions in six.iteritems(offset_data)]
)
log.debug("Sending offset-commit request with %s for group %s to %s",
offsets, self.group_id, node_id)
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_offset_commit_response, offsets, future, time.time())
_f.add_errback(self._failed_request, node_id, request, future)
return future | def function[_send_offset_commit_request, parameter[self, offsets]]:
constant[Commit offsets for the specified list of topics and partitions.
This is a non-blocking call which returns a request future that can be
polled in the case of a synchronous commit or ignored in the
asynchronous case.
Arguments:
offsets (dict of {TopicPartition: OffsetAndMetadata}): what should
be committed
Returns:
Future: indicating whether the commit was successful or not
]
assert[compare[call[name[self].config][constant[api_version]] greater_or_equal[>=] tuple[[<ast.Constant object at 0x7da1b1c4a200>, <ast.Constant object at 0x7da1b1c490f0>, <ast.Constant object at 0x7da1b1c4a230>]]]]
assert[call[name[all], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b1c4a500>, name[offsets]]]]]]
assert[call[name[all], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b21852d0>, call[name[offsets].values, parameter[]]]]]]]
if <ast.UnaryOp object at 0x7da1b2178910> begin[:]
call[name[log].debug, parameter[constant[No offsets to commit]]]
return[call[call[name[Future], parameter[]].success, parameter[constant[None]]]]
variable[node_id] assign[=] call[name[self].coordinator, parameter[]]
if compare[name[node_id] is constant[None]] begin[:]
return[call[call[name[Future], parameter[]].failure, parameter[name[Errors].GroupCoordinatorNotAvailableError]]]
variable[offset_data] assign[=] call[name[collections].defaultdict, parameter[name[dict]]]
for taget[tuple[[<ast.Name object at 0x7da1b1c4b100>, <ast.Name object at 0x7da1b1c4ba90>]]] in starred[call[name[six].iteritems, parameter[name[offsets]]]] begin[:]
call[call[name[offset_data]][name[tp].topic]][name[tp].partition] assign[=] name[offset]
if call[name[self]._subscription.partitions_auto_assigned, parameter[]] begin[:]
variable[generation] assign[=] call[name[self].generation, parameter[]]
if <ast.BoolOp object at 0x7da1b1c49210> begin[:]
return[call[call[name[Future], parameter[]].failure, parameter[call[name[Errors].CommitFailedError, parameter[]]]]]
if compare[call[name[self].config][constant[api_version]] greater_or_equal[>=] tuple[[<ast.Constant object at 0x7da1b1c49d20>, <ast.Constant object at 0x7da1b1c48970>]]] begin[:]
variable[request] assign[=] call[call[name[OffsetCommitRequest]][constant[2]], parameter[name[self].group_id, name[generation].generation_id, name[generation].member_id, call[name[OffsetCommitRequest]][constant[2]].DEFAULT_RETENTION_TIME, <ast.ListComp object at 0x7da1b1c4a020>]]
call[name[log].debug, parameter[constant[Sending offset-commit request with %s for group %s to %s], name[offsets], name[self].group_id, name[node_id]]]
variable[future] assign[=] call[name[Future], parameter[]]
variable[_f] assign[=] call[name[self]._client.send, parameter[name[node_id], name[request]]]
call[name[_f].add_callback, parameter[name[self]._handle_offset_commit_response, name[offsets], name[future], call[name[time].time, parameter[]]]]
call[name[_f].add_errback, parameter[name[self]._failed_request, name[node_id], name[request], name[future]]]
return[name[future]] | keyword[def] identifier[_send_offset_commit_request] ( identifier[self] , identifier[offsets] ):
literal[string]
keyword[assert] identifier[self] . identifier[config] [ literal[string] ]>=( literal[int] , literal[int] , literal[int] ), literal[string]
keyword[assert] identifier[all] ( identifier[map] ( keyword[lambda] identifier[k] : identifier[isinstance] ( identifier[k] , identifier[TopicPartition] ), identifier[offsets] ))
keyword[assert] identifier[all] ( identifier[map] ( keyword[lambda] identifier[v] : identifier[isinstance] ( identifier[v] , identifier[OffsetAndMetadata] ),
identifier[offsets] . identifier[values] ()))
keyword[if] keyword[not] identifier[offsets] :
identifier[log] . identifier[debug] ( literal[string] )
keyword[return] identifier[Future] (). identifier[success] ( keyword[None] )
identifier[node_id] = identifier[self] . identifier[coordinator] ()
keyword[if] identifier[node_id] keyword[is] keyword[None] :
keyword[return] identifier[Future] (). identifier[failure] ( identifier[Errors] . identifier[GroupCoordinatorNotAvailableError] )
identifier[offset_data] = identifier[collections] . identifier[defaultdict] ( identifier[dict] )
keyword[for] identifier[tp] , identifier[offset] keyword[in] identifier[six] . identifier[iteritems] ( identifier[offsets] ):
identifier[offset_data] [ identifier[tp] . identifier[topic] ][ identifier[tp] . identifier[partition] ]= identifier[offset]
keyword[if] identifier[self] . identifier[_subscription] . identifier[partitions_auto_assigned] ():
identifier[generation] = identifier[self] . identifier[generation] ()
keyword[else] :
identifier[generation] = identifier[Generation] . identifier[NO_GENERATION]
keyword[if] identifier[self] . identifier[config] [ literal[string] ]>=( literal[int] , literal[int] ) keyword[and] identifier[generation] keyword[is] keyword[None] :
keyword[return] identifier[Future] (). identifier[failure] ( identifier[Errors] . identifier[CommitFailedError] ())
keyword[if] identifier[self] . identifier[config] [ literal[string] ]>=( literal[int] , literal[int] ):
identifier[request] = identifier[OffsetCommitRequest] [ literal[int] ](
identifier[self] . identifier[group_id] ,
identifier[generation] . identifier[generation_id] ,
identifier[generation] . identifier[member_id] ,
identifier[OffsetCommitRequest] [ literal[int] ]. identifier[DEFAULT_RETENTION_TIME] ,
[(
identifier[topic] ,[(
identifier[partition] ,
identifier[offset] . identifier[offset] ,
identifier[offset] . identifier[metadata]
) keyword[for] identifier[partition] , identifier[offset] keyword[in] identifier[six] . identifier[iteritems] ( identifier[partitions] )]
) keyword[for] identifier[topic] , identifier[partitions] keyword[in] identifier[six] . identifier[iteritems] ( identifier[offset_data] )]
)
keyword[elif] identifier[self] . identifier[config] [ literal[string] ]>=( literal[int] , literal[int] , literal[int] ):
identifier[request] = identifier[OffsetCommitRequest] [ literal[int] ](
identifier[self] . identifier[group_id] ,- literal[int] , literal[string] ,
[(
identifier[topic] ,[(
identifier[partition] ,
identifier[offset] . identifier[offset] ,
- literal[int] ,
identifier[offset] . identifier[metadata]
) keyword[for] identifier[partition] , identifier[offset] keyword[in] identifier[six] . identifier[iteritems] ( identifier[partitions] )]
) keyword[for] identifier[topic] , identifier[partitions] keyword[in] identifier[six] . identifier[iteritems] ( identifier[offset_data] )]
)
keyword[elif] identifier[self] . identifier[config] [ literal[string] ]>=( literal[int] , literal[int] , literal[int] ):
identifier[request] = identifier[OffsetCommitRequest] [ literal[int] ](
identifier[self] . identifier[group_id] ,
[(
identifier[topic] ,[(
identifier[partition] ,
identifier[offset] . identifier[offset] ,
identifier[offset] . identifier[metadata]
) keyword[for] identifier[partition] , identifier[offset] keyword[in] identifier[six] . identifier[iteritems] ( identifier[partitions] )]
) keyword[for] identifier[topic] , identifier[partitions] keyword[in] identifier[six] . identifier[iteritems] ( identifier[offset_data] )]
)
identifier[log] . identifier[debug] ( literal[string] ,
identifier[offsets] , identifier[self] . identifier[group_id] , identifier[node_id] )
identifier[future] = identifier[Future] ()
identifier[_f] = identifier[self] . identifier[_client] . identifier[send] ( identifier[node_id] , identifier[request] )
identifier[_f] . identifier[add_callback] ( identifier[self] . identifier[_handle_offset_commit_response] , identifier[offsets] , identifier[future] , identifier[time] . identifier[time] ())
identifier[_f] . identifier[add_errback] ( identifier[self] . identifier[_failed_request] , identifier[node_id] , identifier[request] , identifier[future] )
keyword[return] identifier[future] | def _send_offset_commit_request(self, offsets):
"""Commit offsets for the specified list of topics and partitions.
This is a non-blocking call which returns a request future that can be
polled in the case of a synchronous commit or ignored in the
asynchronous case.
Arguments:
offsets (dict of {TopicPartition: OffsetAndMetadata}): what should
be committed
Returns:
Future: indicating whether the commit was successful or not
"""
assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API'
assert all(map(lambda k: isinstance(k, TopicPartition), offsets))
assert all(map(lambda v: isinstance(v, OffsetAndMetadata), offsets.values()))
if not offsets:
log.debug('No offsets to commit')
return Future().success(None) # depends on [control=['if'], data=[]]
node_id = self.coordinator()
if node_id is None:
return Future().failure(Errors.GroupCoordinatorNotAvailableError) # depends on [control=['if'], data=[]]
# create the offset commit request
offset_data = collections.defaultdict(dict)
for (tp, offset) in six.iteritems(offsets):
offset_data[tp.topic][tp.partition] = offset # depends on [control=['for'], data=[]]
if self._subscription.partitions_auto_assigned():
generation = self.generation() # depends on [control=['if'], data=[]]
else:
generation = Generation.NO_GENERATION
# if the generation is None, we are not part of an active group
# (and we expect to be). The only thing we can do is fail the commit
# and let the user rejoin the group in poll()
if self.config['api_version'] >= (0, 9) and generation is None:
return Future().failure(Errors.CommitFailedError()) # depends on [control=['if'], data=[]]
if self.config['api_version'] >= (0, 9):
request = OffsetCommitRequest[2](self.group_id, generation.generation_id, generation.member_id, OffsetCommitRequest[2].DEFAULT_RETENTION_TIME, [(topic, [(partition, offset.offset, offset.metadata) for (partition, offset) in six.iteritems(partitions)]) for (topic, partitions) in six.iteritems(offset_data)]) # depends on [control=['if'], data=[]]
elif self.config['api_version'] >= (0, 8, 2):
request = OffsetCommitRequest[1](self.group_id, -1, '', [(topic, [(partition, offset.offset, -1, offset.metadata) for (partition, offset) in six.iteritems(partitions)]) for (topic, partitions) in six.iteritems(offset_data)]) # depends on [control=['if'], data=[]]
elif self.config['api_version'] >= (0, 8, 1):
request = OffsetCommitRequest[0](self.group_id, [(topic, [(partition, offset.offset, offset.metadata) for (partition, offset) in six.iteritems(partitions)]) for (topic, partitions) in six.iteritems(offset_data)]) # depends on [control=['if'], data=[]]
log.debug('Sending offset-commit request with %s for group %s to %s', offsets, self.group_id, node_id)
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_offset_commit_response, offsets, future, time.time())
_f.add_errback(self._failed_request, node_id, request, future)
return future |
def create(self):
"""Launches a new server instance."""
self.server_attrs = self.consul.create_server(
"%s-%s" % (self.stack.name, self.name),
self.disk_image_id,
self.instance_type,
self.ssh_key_name,
tags=self.tags,
availability_zone=self.availability_zone,
timeout_s=self.launch_timeout_s,
security_groups=self.security_groups,
**self.provider_extras
)
log.debug('Post launch delay: %d s' % self.post_launch_delay_s)
time.sleep(self.post_launch_delay_s) | def function[create, parameter[self]]:
constant[Launches a new server instance.]
name[self].server_attrs assign[=] call[name[self].consul.create_server, parameter[binary_operation[constant[%s-%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18eb54fa0>, <ast.Attribute object at 0x7da18eb545e0>]]], name[self].disk_image_id, name[self].instance_type, name[self].ssh_key_name]]
call[name[log].debug, parameter[binary_operation[constant[Post launch delay: %d s] <ast.Mod object at 0x7da2590d6920> name[self].post_launch_delay_s]]]
call[name[time].sleep, parameter[name[self].post_launch_delay_s]] | keyword[def] identifier[create] ( identifier[self] ):
literal[string]
identifier[self] . identifier[server_attrs] = identifier[self] . identifier[consul] . identifier[create_server] (
literal[string] %( identifier[self] . identifier[stack] . identifier[name] , identifier[self] . identifier[name] ),
identifier[self] . identifier[disk_image_id] ,
identifier[self] . identifier[instance_type] ,
identifier[self] . identifier[ssh_key_name] ,
identifier[tags] = identifier[self] . identifier[tags] ,
identifier[availability_zone] = identifier[self] . identifier[availability_zone] ,
identifier[timeout_s] = identifier[self] . identifier[launch_timeout_s] ,
identifier[security_groups] = identifier[self] . identifier[security_groups] ,
** identifier[self] . identifier[provider_extras]
)
identifier[log] . identifier[debug] ( literal[string] % identifier[self] . identifier[post_launch_delay_s] )
identifier[time] . identifier[sleep] ( identifier[self] . identifier[post_launch_delay_s] ) | def create(self):
"""Launches a new server instance."""
self.server_attrs = self.consul.create_server('%s-%s' % (self.stack.name, self.name), self.disk_image_id, self.instance_type, self.ssh_key_name, tags=self.tags, availability_zone=self.availability_zone, timeout_s=self.launch_timeout_s, security_groups=self.security_groups, **self.provider_extras)
log.debug('Post launch delay: %d s' % self.post_launch_delay_s)
time.sleep(self.post_launch_delay_s) |
def download_software(name, version=None, synch=False, check=False):
'''
Ensures that a software version is downloaded.
name: The name of the module function to execute.
version(str): The software version to check. If this version is not already downloaded, it will attempt to download
the file from Palo Alto.
synch(bool): If true, after downloading the file it will be synched to its peer.
check(bool): If true, the PANOS device will first attempt to pull the most recent software inventory list from Palo
Alto.
SLS Example:
.. code-block:: yaml
panos/version8.0.0:
panos.download_software:
- version: 8.0.0
- synch: False
- check: True
'''
ret = _default_ret(name)
if check is True:
__salt__['panos.check_software']()
versions = __salt__['panos.get_software_info']()['result']
if 'sw-updates' not in versions \
or 'versions' not in versions['sw-updates'] \
or 'entry' not in versions['sw-updates']['versions']:
ret.update({
'comment': 'Software version is not found in the local software list.',
'result': False
})
return ret
for entry in versions['sw-updates']['versions']['entry']:
if entry['version'] == version and entry['downloaded'] == "yes":
ret.update({
'comment': 'Software version is already downloaded.',
'result': True
})
return ret
ret.update({
'changes': __salt__['panos.download_software_version'](version=version, synch=synch)
})
versions = __salt__['panos.get_software_info']()['result']
if 'sw-updates' not in versions \
or 'versions' not in versions['sw-updates'] \
or 'entry' not in versions['sw-updates']['versions']:
ret.update({
'result': False
})
return ret
for entry in versions['sw-updates']['versions']['entry']:
if entry['version'] == version and entry['downloaded'] == "yes":
ret.update({
'result': True
})
return ret
return ret | def function[download_software, parameter[name, version, synch, check]]:
constant[
Ensures that a software version is downloaded.
name: The name of the module function to execute.
version(str): The software version to check. If this version is not already downloaded, it will attempt to download
the file from Palo Alto.
synch(bool): If true, after downloading the file it will be synched to its peer.
check(bool): If true, the PANOS device will first attempt to pull the most recent software inventory list from Palo
Alto.
SLS Example:
.. code-block:: yaml
panos/version8.0.0:
panos.download_software:
- version: 8.0.0
- synch: False
- check: True
]
variable[ret] assign[=] call[name[_default_ret], parameter[name[name]]]
if compare[name[check] is constant[True]] begin[:]
call[call[name[__salt__]][constant[panos.check_software]], parameter[]]
variable[versions] assign[=] call[call[call[name[__salt__]][constant[panos.get_software_info]], parameter[]]][constant[result]]
if <ast.BoolOp object at 0x7da18f58c220> begin[:]
call[name[ret].update, parameter[dictionary[[<ast.Constant object at 0x7da20c6c55a0>, <ast.Constant object at 0x7da20c6c4cd0>], [<ast.Constant object at 0x7da20c6c7cd0>, <ast.Constant object at 0x7da20c6c78e0>]]]]
return[name[ret]]
for taget[name[entry]] in starred[call[call[call[name[versions]][constant[sw-updates]]][constant[versions]]][constant[entry]]] begin[:]
if <ast.BoolOp object at 0x7da20c6c67d0> begin[:]
call[name[ret].update, parameter[dictionary[[<ast.Constant object at 0x7da20c6c6380>, <ast.Constant object at 0x7da20c6c4370>], [<ast.Constant object at 0x7da20c6c7850>, <ast.Constant object at 0x7da20c6c4580>]]]]
return[name[ret]]
call[name[ret].update, parameter[dictionary[[<ast.Constant object at 0x7da20c6c5180>], [<ast.Call object at 0x7da20c6c45b0>]]]]
variable[versions] assign[=] call[call[call[name[__salt__]][constant[panos.get_software_info]], parameter[]]][constant[result]]
if <ast.BoolOp object at 0x7da18bc73df0> begin[:]
call[name[ret].update, parameter[dictionary[[<ast.Constant object at 0x7da18bc73130>], [<ast.Constant object at 0x7da18bc72920>]]]]
return[name[ret]]
for taget[name[entry]] in starred[call[call[call[name[versions]][constant[sw-updates]]][constant[versions]]][constant[entry]]] begin[:]
if <ast.BoolOp object at 0x7da18bc72b30> begin[:]
call[name[ret].update, parameter[dictionary[[<ast.Constant object at 0x7da204345420>], [<ast.Constant object at 0x7da204344250>]]]]
return[name[ret]]
return[name[ret]] | keyword[def] identifier[download_software] ( identifier[name] , identifier[version] = keyword[None] , identifier[synch] = keyword[False] , identifier[check] = keyword[False] ):
literal[string]
identifier[ret] = identifier[_default_ret] ( identifier[name] )
keyword[if] identifier[check] keyword[is] keyword[True] :
identifier[__salt__] [ literal[string] ]()
identifier[versions] = identifier[__salt__] [ literal[string] ]()[ literal[string] ]
keyword[if] literal[string] keyword[not] keyword[in] identifier[versions] keyword[or] literal[string] keyword[not] keyword[in] identifier[versions] [ literal[string] ] keyword[or] literal[string] keyword[not] keyword[in] identifier[versions] [ literal[string] ][ literal[string] ]:
identifier[ret] . identifier[update] ({
literal[string] : literal[string] ,
literal[string] : keyword[False]
})
keyword[return] identifier[ret]
keyword[for] identifier[entry] keyword[in] identifier[versions] [ literal[string] ][ literal[string] ][ literal[string] ]:
keyword[if] identifier[entry] [ literal[string] ]== identifier[version] keyword[and] identifier[entry] [ literal[string] ]== literal[string] :
identifier[ret] . identifier[update] ({
literal[string] : literal[string] ,
literal[string] : keyword[True]
})
keyword[return] identifier[ret]
identifier[ret] . identifier[update] ({
literal[string] : identifier[__salt__] [ literal[string] ]( identifier[version] = identifier[version] , identifier[synch] = identifier[synch] )
})
identifier[versions] = identifier[__salt__] [ literal[string] ]()[ literal[string] ]
keyword[if] literal[string] keyword[not] keyword[in] identifier[versions] keyword[or] literal[string] keyword[not] keyword[in] identifier[versions] [ literal[string] ] keyword[or] literal[string] keyword[not] keyword[in] identifier[versions] [ literal[string] ][ literal[string] ]:
identifier[ret] . identifier[update] ({
literal[string] : keyword[False]
})
keyword[return] identifier[ret]
keyword[for] identifier[entry] keyword[in] identifier[versions] [ literal[string] ][ literal[string] ][ literal[string] ]:
keyword[if] identifier[entry] [ literal[string] ]== identifier[version] keyword[and] identifier[entry] [ literal[string] ]== literal[string] :
identifier[ret] . identifier[update] ({
literal[string] : keyword[True]
})
keyword[return] identifier[ret]
keyword[return] identifier[ret] | def download_software(name, version=None, synch=False, check=False):
"""
Ensures that a software version is downloaded.
name: The name of the module function to execute.
version(str): The software version to check. If this version is not already downloaded, it will attempt to download
the file from Palo Alto.
synch(bool): If true, after downloading the file it will be synched to its peer.
check(bool): If true, the PANOS device will first attempt to pull the most recent software inventory list from Palo
Alto.
SLS Example:
.. code-block:: yaml
panos/version8.0.0:
panos.download_software:
- version: 8.0.0
- synch: False
- check: True
"""
ret = _default_ret(name)
if check is True:
__salt__['panos.check_software']() # depends on [control=['if'], data=[]]
versions = __salt__['panos.get_software_info']()['result']
if 'sw-updates' not in versions or 'versions' not in versions['sw-updates'] or 'entry' not in versions['sw-updates']['versions']:
ret.update({'comment': 'Software version is not found in the local software list.', 'result': False})
return ret # depends on [control=['if'], data=[]]
for entry in versions['sw-updates']['versions']['entry']:
if entry['version'] == version and entry['downloaded'] == 'yes':
ret.update({'comment': 'Software version is already downloaded.', 'result': True}) # depends on [control=['if'], data=[]]
return ret # depends on [control=['for'], data=['entry']]
ret.update({'changes': __salt__['panos.download_software_version'](version=version, synch=synch)})
versions = __salt__['panos.get_software_info']()['result']
if 'sw-updates' not in versions or 'versions' not in versions['sw-updates'] or 'entry' not in versions['sw-updates']['versions']:
ret.update({'result': False})
return ret # depends on [control=['if'], data=[]]
for entry in versions['sw-updates']['versions']['entry']:
if entry['version'] == version and entry['downloaded'] == 'yes':
ret.update({'result': True}) # depends on [control=['if'], data=[]]
return ret # depends on [control=['for'], data=['entry']]
return ret |
def normalize_unicode(text):
"""
Normalize any unicode characters to ascii equivalent
https://docs.python.org/2/library/unicodedata.html#unicodedata.normalize
"""
if isinstance(text, six.text_type):
return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8')
else:
return text | def function[normalize_unicode, parameter[text]]:
constant[
Normalize any unicode characters to ascii equivalent
https://docs.python.org/2/library/unicodedata.html#unicodedata.normalize
]
if call[name[isinstance], parameter[name[text], name[six].text_type]] begin[:]
return[call[call[call[name[unicodedata].normalize, parameter[constant[NFKD], name[text]]].encode, parameter[constant[ascii], constant[ignore]]].decode, parameter[constant[utf8]]]] | keyword[def] identifier[normalize_unicode] ( identifier[text] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[text] , identifier[six] . identifier[text_type] ):
keyword[return] identifier[unicodedata] . identifier[normalize] ( literal[string] , identifier[text] ). identifier[encode] ( literal[string] , literal[string] ). identifier[decode] ( literal[string] )
keyword[else] :
keyword[return] identifier[text] | def normalize_unicode(text):
"""
Normalize any unicode characters to ascii equivalent
https://docs.python.org/2/library/unicodedata.html#unicodedata.normalize
"""
if isinstance(text, six.text_type):
return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8') # depends on [control=['if'], data=[]]
else:
return text |
def try_until_succeed(_warning_period, func, *argv, **kwarg):
"""Try a function until it successfully returns.
Print current exception every ``_warning_period`` times try.
**中文文档**
尝试一个函数直到其成功为止。不适用于会出现死循环的函数。
每隔``_warning_period``次失败, 就会打印当前异常和尝试次数。
"""
if (not isinstance(_warning_period, int)) or (_warning_period < 1):
raise Exception("'_warning_period' argument has to be int "
"and greater than 0")
counter = 1
while 1:
try:
return func(*argv, **kwarg)
except Exception as e:
current_exception = e
counter += 1
if (counter % _warning_period) == 0:
print("Warning: this is %sth try, current error is "
"%s" % (counter, repr(current_exception))) | def function[try_until_succeed, parameter[_warning_period, func]]:
constant[Try a function until it successfully returns.
Print current exception every ``_warning_period`` times try.
**中文文档**
尝试一个函数直到其成功为止。不适用于会出现死循环的函数。
每隔``_warning_period``次失败, 就会打印当前异常和尝试次数。
]
if <ast.BoolOp object at 0x7da20c990e80> begin[:]
<ast.Raise object at 0x7da20c990ac0>
variable[counter] assign[=] constant[1]
while constant[1] begin[:]
<ast.Try object at 0x7da20c991d80> | keyword[def] identifier[try_until_succeed] ( identifier[_warning_period] , identifier[func] ,* identifier[argv] ,** identifier[kwarg] ):
literal[string]
keyword[if] ( keyword[not] identifier[isinstance] ( identifier[_warning_period] , identifier[int] )) keyword[or] ( identifier[_warning_period] < literal[int] ):
keyword[raise] identifier[Exception] ( literal[string]
literal[string] )
identifier[counter] = literal[int]
keyword[while] literal[int] :
keyword[try] :
keyword[return] identifier[func] (* identifier[argv] ,** identifier[kwarg] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[current_exception] = identifier[e]
identifier[counter] += literal[int]
keyword[if] ( identifier[counter] % identifier[_warning_period] )== literal[int] :
identifier[print] ( literal[string]
literal[string] %( identifier[counter] , identifier[repr] ( identifier[current_exception] ))) | def try_until_succeed(_warning_period, func, *argv, **kwarg):
"""Try a function until it successfully returns.
Print current exception every ``_warning_period`` times try.
**中文文档**
尝试一个函数直到其成功为止。不适用于会出现死循环的函数。
每隔``_warning_period``次失败, 就会打印当前异常和尝试次数。
"""
if not isinstance(_warning_period, int) or _warning_period < 1:
raise Exception("'_warning_period' argument has to be int and greater than 0") # depends on [control=['if'], data=[]]
counter = 1
while 1:
try:
return func(*argv, **kwarg) # depends on [control=['try'], data=[]]
except Exception as e:
current_exception = e
counter += 1
if counter % _warning_period == 0:
print('Warning: this is %sth try, current error is %s' % (counter, repr(current_exception))) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]] |
def fill4(image, mask=None, iterations=1):
'''Fill 4-connected black pixels
x 1 x x 1 x
1 0 1 -> 1 1 1
x 1 x x 1 x
'''
global fill4_table
if mask is None:
masked_image = image
else:
masked_image = image.astype(bool).copy()
masked_image[~mask] = True
result = table_lookup(masked_image, fill4_table, True, iterations)
if not mask is None:
result[~mask] = image[~mask]
return result | def function[fill4, parameter[image, mask, iterations]]:
constant[Fill 4-connected black pixels
x 1 x x 1 x
1 0 1 -> 1 1 1
x 1 x x 1 x
]
<ast.Global object at 0x7da20e9606a0>
if compare[name[mask] is constant[None]] begin[:]
variable[masked_image] assign[=] name[image]
variable[result] assign[=] call[name[table_lookup], parameter[name[masked_image], name[fill4_table], constant[True], name[iterations]]]
if <ast.UnaryOp object at 0x7da20c6e4c70> begin[:]
call[name[result]][<ast.UnaryOp object at 0x7da20c6e78b0>] assign[=] call[name[image]][<ast.UnaryOp object at 0x7da20c6e4730>]
return[name[result]] | keyword[def] identifier[fill4] ( identifier[image] , identifier[mask] = keyword[None] , identifier[iterations] = literal[int] ):
literal[string]
keyword[global] identifier[fill4_table]
keyword[if] identifier[mask] keyword[is] keyword[None] :
identifier[masked_image] = identifier[image]
keyword[else] :
identifier[masked_image] = identifier[image] . identifier[astype] ( identifier[bool] ). identifier[copy] ()
identifier[masked_image] [~ identifier[mask] ]= keyword[True]
identifier[result] = identifier[table_lookup] ( identifier[masked_image] , identifier[fill4_table] , keyword[True] , identifier[iterations] )
keyword[if] keyword[not] identifier[mask] keyword[is] keyword[None] :
identifier[result] [~ identifier[mask] ]= identifier[image] [~ identifier[mask] ]
keyword[return] identifier[result] | def fill4(image, mask=None, iterations=1):
"""Fill 4-connected black pixels
x 1 x x 1 x
1 0 1 -> 1 1 1
x 1 x x 1 x
"""
global fill4_table
if mask is None:
masked_image = image # depends on [control=['if'], data=[]]
else:
masked_image = image.astype(bool).copy()
masked_image[~mask] = True
result = table_lookup(masked_image, fill4_table, True, iterations)
if not mask is None:
result[~mask] = image[~mask] # depends on [control=['if'], data=[]]
return result |
def reload(self, r=None, pr=None, timeout=None, basic_quorum=None,
notfound_ok=None, head_only=False):
"""
Reload the object from Riak. When this operation completes, the
object could contain new metadata and a new value, if the object
was updated in Riak since it was last retrieved.
.. note:: Even if the key is not found in Riak, this will
return a :class:`RiakObject`. Check the :attr:`exists`
property to see if the key was found.
:param r: R-Value, wait for this many partitions to respond
before returning to client.
:type r: integer
:param pr: PR-value, require this many primary partitions to
be available before performing the read that
precedes the put
:type pr: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param head_only: whether to fetch without value, so only metadata
(only available on PB transport)
:type head_only: bool
:rtype: :class:`RiakObject`
"""
self.client.get(self, r=r, pr=pr, timeout=timeout, head_only=head_only)
return self | def function[reload, parameter[self, r, pr, timeout, basic_quorum, notfound_ok, head_only]]:
constant[
Reload the object from Riak. When this operation completes, the
object could contain new metadata and a new value, if the object
was updated in Riak since it was last retrieved.
.. note:: Even if the key is not found in Riak, this will
return a :class:`RiakObject`. Check the :attr:`exists`
property to see if the key was found.
:param r: R-Value, wait for this many partitions to respond
before returning to client.
:type r: integer
:param pr: PR-value, require this many primary partitions to
be available before performing the read that
precedes the put
:type pr: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param head_only: whether to fetch without value, so only metadata
(only available on PB transport)
:type head_only: bool
:rtype: :class:`RiakObject`
]
call[name[self].client.get, parameter[name[self]]]
return[name[self]] | keyword[def] identifier[reload] ( identifier[self] , identifier[r] = keyword[None] , identifier[pr] = keyword[None] , identifier[timeout] = keyword[None] , identifier[basic_quorum] = keyword[None] ,
identifier[notfound_ok] = keyword[None] , identifier[head_only] = keyword[False] ):
literal[string]
identifier[self] . identifier[client] . identifier[get] ( identifier[self] , identifier[r] = identifier[r] , identifier[pr] = identifier[pr] , identifier[timeout] = identifier[timeout] , identifier[head_only] = identifier[head_only] )
keyword[return] identifier[self] | def reload(self, r=None, pr=None, timeout=None, basic_quorum=None, notfound_ok=None, head_only=False):
"""
Reload the object from Riak. When this operation completes, the
object could contain new metadata and a new value, if the object
was updated in Riak since it was last retrieved.
.. note:: Even if the key is not found in Riak, this will
return a :class:`RiakObject`. Check the :attr:`exists`
property to see if the key was found.
:param r: R-Value, wait for this many partitions to respond
before returning to client.
:type r: integer
:param pr: PR-value, require this many primary partitions to
be available before performing the read that
precedes the put
:type pr: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param head_only: whether to fetch without value, so only metadata
(only available on PB transport)
:type head_only: bool
:rtype: :class:`RiakObject`
"""
self.client.get(self, r=r, pr=pr, timeout=timeout, head_only=head_only)
return self |
def update_sma(self, step):
"""
Calculate an updated value for the semimajor axis, given the
current value and the step value.
The step value must be managed by the caller to support both
modes: grow outwards and shrink inwards.
Parameters
----------
step : float
The step value.
Returns
-------
sma : float
The new semimajor axis length.
"""
if self.linear_growth:
sma = self.sma + step
else:
sma = self.sma * (1. + step)
return sma | def function[update_sma, parameter[self, step]]:
constant[
Calculate an updated value for the semimajor axis, given the
current value and the step value.
The step value must be managed by the caller to support both
modes: grow outwards and shrink inwards.
Parameters
----------
step : float
The step value.
Returns
-------
sma : float
The new semimajor axis length.
]
if name[self].linear_growth begin[:]
variable[sma] assign[=] binary_operation[name[self].sma + name[step]]
return[name[sma]] | keyword[def] identifier[update_sma] ( identifier[self] , identifier[step] ):
literal[string]
keyword[if] identifier[self] . identifier[linear_growth] :
identifier[sma] = identifier[self] . identifier[sma] + identifier[step]
keyword[else] :
identifier[sma] = identifier[self] . identifier[sma] *( literal[int] + identifier[step] )
keyword[return] identifier[sma] | def update_sma(self, step):
"""
Calculate an updated value for the semimajor axis, given the
current value and the step value.
The step value must be managed by the caller to support both
modes: grow outwards and shrink inwards.
Parameters
----------
step : float
The step value.
Returns
-------
sma : float
The new semimajor axis length.
"""
if self.linear_growth:
sma = self.sma + step # depends on [control=['if'], data=[]]
else:
sma = self.sma * (1.0 + step)
return sma |
def smart_fit(image, fit_to_width, fit_to_height):
"""
Proportionally fit the image into the specified width and height.
Return the correct width and height.
"""
im_width, im_height = image.size
out_width, out_height = fit_to_width, fit_to_height
if im_width == 0 or im_height == 0:
return (fit_to_width, fit_to_height)
w_scale = float(fit_to_width) / float(im_width)
h_scale = float(fit_to_height) / float(im_height)
if w_scale < h_scale:
scale = float(fit_to_width) / float(im_width)
out_height = int(round(scale * im_height))
else:
scale = float(fit_to_height) / float(im_height)
out_width = int(round(scale * im_width))
return out_width, out_height | def function[smart_fit, parameter[image, fit_to_width, fit_to_height]]:
constant[
Proportionally fit the image into the specified width and height.
Return the correct width and height.
]
<ast.Tuple object at 0x7da20c794310> assign[=] name[image].size
<ast.Tuple object at 0x7da20c796080> assign[=] tuple[[<ast.Name object at 0x7da20c795c30>, <ast.Name object at 0x7da20c795990>]]
if <ast.BoolOp object at 0x7da20c795840> begin[:]
return[tuple[[<ast.Name object at 0x7da20c796a70>, <ast.Name object at 0x7da20c795540>]]]
variable[w_scale] assign[=] binary_operation[call[name[float], parameter[name[fit_to_width]]] / call[name[float], parameter[name[im_width]]]]
variable[h_scale] assign[=] binary_operation[call[name[float], parameter[name[fit_to_height]]] / call[name[float], parameter[name[im_height]]]]
if compare[name[w_scale] less[<] name[h_scale]] begin[:]
variable[scale] assign[=] binary_operation[call[name[float], parameter[name[fit_to_width]]] / call[name[float], parameter[name[im_width]]]]
variable[out_height] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[name[scale] * name[im_height]]]]]]
return[tuple[[<ast.Name object at 0x7da18fe90fa0>, <ast.Name object at 0x7da18fe91d20>]]] | keyword[def] identifier[smart_fit] ( identifier[image] , identifier[fit_to_width] , identifier[fit_to_height] ):
literal[string]
identifier[im_width] , identifier[im_height] = identifier[image] . identifier[size]
identifier[out_width] , identifier[out_height] = identifier[fit_to_width] , identifier[fit_to_height]
keyword[if] identifier[im_width] == literal[int] keyword[or] identifier[im_height] == literal[int] :
keyword[return] ( identifier[fit_to_width] , identifier[fit_to_height] )
identifier[w_scale] = identifier[float] ( identifier[fit_to_width] )/ identifier[float] ( identifier[im_width] )
identifier[h_scale] = identifier[float] ( identifier[fit_to_height] )/ identifier[float] ( identifier[im_height] )
keyword[if] identifier[w_scale] < identifier[h_scale] :
identifier[scale] = identifier[float] ( identifier[fit_to_width] )/ identifier[float] ( identifier[im_width] )
identifier[out_height] = identifier[int] ( identifier[round] ( identifier[scale] * identifier[im_height] ))
keyword[else] :
identifier[scale] = identifier[float] ( identifier[fit_to_height] )/ identifier[float] ( identifier[im_height] )
identifier[out_width] = identifier[int] ( identifier[round] ( identifier[scale] * identifier[im_width] ))
keyword[return] identifier[out_width] , identifier[out_height] | def smart_fit(image, fit_to_width, fit_to_height):
"""
Proportionally fit the image into the specified width and height.
Return the correct width and height.
"""
(im_width, im_height) = image.size
(out_width, out_height) = (fit_to_width, fit_to_height)
if im_width == 0 or im_height == 0:
return (fit_to_width, fit_to_height) # depends on [control=['if'], data=[]]
w_scale = float(fit_to_width) / float(im_width)
h_scale = float(fit_to_height) / float(im_height)
if w_scale < h_scale:
scale = float(fit_to_width) / float(im_width)
out_height = int(round(scale * im_height)) # depends on [control=['if'], data=[]]
else:
scale = float(fit_to_height) / float(im_height)
out_width = int(round(scale * im_width))
return (out_width, out_height) |
def run(self, args=None):
"""
Runs the command passing in the parsed arguments.
:param args: The arguments to run the command with. If ``None`` the arguments
are gathered from the argument parser. This is automatically set when calling
sub commands and in most cases should not be set for the root command.
:return: The status code of the action (0 on success)
"""
args = args or self.parse_args()
sub_command_name = getattr(args, self.sub_parser_dest_name, None)
if sub_command_name:
sub_commands = self.get_sub_commands()
cmd_cls = sub_commands[sub_command_name]
return cmd_cls(sub_command_name).run(args)
return self.action(args) or 0 | def function[run, parameter[self, args]]:
constant[
Runs the command passing in the parsed arguments.
:param args: The arguments to run the command with. If ``None`` the arguments
are gathered from the argument parser. This is automatically set when calling
sub commands and in most cases should not be set for the root command.
:return: The status code of the action (0 on success)
]
variable[args] assign[=] <ast.BoolOp object at 0x7da1b021d390>
variable[sub_command_name] assign[=] call[name[getattr], parameter[name[args], name[self].sub_parser_dest_name, constant[None]]]
if name[sub_command_name] begin[:]
variable[sub_commands] assign[=] call[name[self].get_sub_commands, parameter[]]
variable[cmd_cls] assign[=] call[name[sub_commands]][name[sub_command_name]]
return[call[call[name[cmd_cls], parameter[name[sub_command_name]]].run, parameter[name[args]]]]
return[<ast.BoolOp object at 0x7da1b021df00>] | keyword[def] identifier[run] ( identifier[self] , identifier[args] = keyword[None] ):
literal[string]
identifier[args] = identifier[args] keyword[or] identifier[self] . identifier[parse_args] ()
identifier[sub_command_name] = identifier[getattr] ( identifier[args] , identifier[self] . identifier[sub_parser_dest_name] , keyword[None] )
keyword[if] identifier[sub_command_name] :
identifier[sub_commands] = identifier[self] . identifier[get_sub_commands] ()
identifier[cmd_cls] = identifier[sub_commands] [ identifier[sub_command_name] ]
keyword[return] identifier[cmd_cls] ( identifier[sub_command_name] ). identifier[run] ( identifier[args] )
keyword[return] identifier[self] . identifier[action] ( identifier[args] ) keyword[or] literal[int] | def run(self, args=None):
"""
Runs the command passing in the parsed arguments.
:param args: The arguments to run the command with. If ``None`` the arguments
are gathered from the argument parser. This is automatically set when calling
sub commands and in most cases should not be set for the root command.
:return: The status code of the action (0 on success)
"""
args = args or self.parse_args()
sub_command_name = getattr(args, self.sub_parser_dest_name, None)
if sub_command_name:
sub_commands = self.get_sub_commands()
cmd_cls = sub_commands[sub_command_name]
return cmd_cls(sub_command_name).run(args) # depends on [control=['if'], data=[]]
return self.action(args) or 0 |
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = 1
for i, mul in enumerate(shape):
acc *= int(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def maybe_lift(lab, size):
# promote nan values (assigned -1 label in lab array)
# so that all output values are non-negative
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
labels = list(labels)
shape = list(shape)
# Iteratively process all the labels in chunks sized so less
# than _INT64_MAX unique int ids will be required for each chunk
while True:
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
break
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return out | def function[get_group_index, parameter[labels, shape, sort, xnull]]:
constant[
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
]
def function[_int64_cut_off, parameter[shape]]:
variable[acc] assign[=] constant[1]
for taget[tuple[[<ast.Name object at 0x7da1b26ae2c0>, <ast.Name object at 0x7da1b26ae3b0>]]] in starred[call[name[enumerate], parameter[name[shape]]]] begin[:]
<ast.AugAssign object at 0x7da1b26aef80>
if <ast.UnaryOp object at 0x7da1b26ae6e0> begin[:]
return[name[i]]
return[call[name[len], parameter[name[shape]]]]
def function[maybe_lift, parameter[lab, size]]:
return[<ast.IfExp object at 0x7da1b26ada50>]
variable[labels] assign[=] call[name[map], parameter[name[ensure_int64], name[labels]]]
if <ast.UnaryOp object at 0x7da1b26ae920> begin[:]
<ast.Tuple object at 0x7da1b26aeb00> assign[=] call[name[map], parameter[name[list], call[name[zip], parameter[<ast.Starred object at 0x7da1b26ae9e0>]]]]
variable[labels] assign[=] call[name[list], parameter[name[labels]]]
variable[shape] assign[=] call[name[list], parameter[name[shape]]]
while constant[True] begin[:]
variable[nlev] assign[=] call[name[_int64_cut_off], parameter[name[shape]]]
variable[stride] assign[=] call[name[np].prod, parameter[call[name[shape]][<ast.Slice object at 0x7da1b26ae200>]]]
variable[out] assign[=] binary_operation[name[stride] * call[call[name[labels]][constant[0]].astype, parameter[constant[i8]]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], name[nlev]]]] begin[:]
if compare[call[name[shape]][name[i]] equal[==] constant[0]] begin[:]
variable[stride] assign[=] constant[0]
<ast.AugAssign object at 0x7da1b26afd60>
if name[xnull] begin[:]
variable[mask] assign[=] compare[call[name[labels]][constant[0]] equal[==] <ast.UnaryOp object at 0x7da1b26ad420>]
for taget[name[lab]] in starred[call[name[labels]][<ast.Slice object at 0x7da1b26aece0>]] begin[:]
<ast.AugAssign object at 0x7da1b26ade40>
call[name[out]][name[mask]] assign[=] <ast.UnaryOp object at 0x7da1b26adf60>
if compare[name[nlev] equal[==] call[name[len], parameter[name[shape]]]] begin[:]
break
<ast.Tuple object at 0x7da1b26ae650> assign[=] call[name[compress_group_index], parameter[name[out]]]
variable[labels] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b26af130>]] + call[name[labels]][<ast.Slice object at 0x7da1b26adcc0>]]
variable[shape] assign[=] binary_operation[list[[<ast.Call object at 0x7da1b26ad2a0>]] + call[name[shape]][<ast.Slice object at 0x7da1b26ae830>]]
return[name[out]] | keyword[def] identifier[get_group_index] ( identifier[labels] , identifier[shape] , identifier[sort] , identifier[xnull] ):
literal[string]
keyword[def] identifier[_int64_cut_off] ( identifier[shape] ):
identifier[acc] = literal[int]
keyword[for] identifier[i] , identifier[mul] keyword[in] identifier[enumerate] ( identifier[shape] ):
identifier[acc] *= identifier[int] ( identifier[mul] )
keyword[if] keyword[not] identifier[acc] < identifier[_INT64_MAX] :
keyword[return] identifier[i]
keyword[return] identifier[len] ( identifier[shape] )
keyword[def] identifier[maybe_lift] ( identifier[lab] , identifier[size] ):
keyword[return] ( identifier[lab] + literal[int] , identifier[size] + literal[int] ) keyword[if] ( identifier[lab] ==- literal[int] ). identifier[any] () keyword[else] ( identifier[lab] , identifier[size] )
identifier[labels] = identifier[map] ( identifier[ensure_int64] , identifier[labels] )
keyword[if] keyword[not] identifier[xnull] :
identifier[labels] , identifier[shape] = identifier[map] ( identifier[list] , identifier[zip] (* identifier[map] ( identifier[maybe_lift] , identifier[labels] , identifier[shape] )))
identifier[labels] = identifier[list] ( identifier[labels] )
identifier[shape] = identifier[list] ( identifier[shape] )
keyword[while] keyword[True] :
identifier[nlev] = identifier[_int64_cut_off] ( identifier[shape] )
identifier[stride] = identifier[np] . identifier[prod] ( identifier[shape] [ literal[int] : identifier[nlev] ], identifier[dtype] = literal[string] )
identifier[out] = identifier[stride] * identifier[labels] [ literal[int] ]. identifier[astype] ( literal[string] , identifier[subok] = keyword[False] , identifier[copy] = keyword[False] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[nlev] ):
keyword[if] identifier[shape] [ identifier[i] ]== literal[int] :
identifier[stride] = literal[int]
keyword[else] :
identifier[stride] //= identifier[shape] [ identifier[i] ]
identifier[out] += identifier[labels] [ identifier[i] ]* identifier[stride]
keyword[if] identifier[xnull] :
identifier[mask] = identifier[labels] [ literal[int] ]==- literal[int]
keyword[for] identifier[lab] keyword[in] identifier[labels] [ literal[int] : identifier[nlev] ]:
identifier[mask] |= identifier[lab] ==- literal[int]
identifier[out] [ identifier[mask] ]=- literal[int]
keyword[if] identifier[nlev] == identifier[len] ( identifier[shape] ):
keyword[break]
identifier[comp_ids] , identifier[obs_ids] = identifier[compress_group_index] ( identifier[out] , identifier[sort] = identifier[sort] )
identifier[labels] =[ identifier[comp_ids] ]+ identifier[labels] [ identifier[nlev] :]
identifier[shape] =[ identifier[len] ( identifier[obs_ids] )]+ identifier[shape] [ identifier[nlev] :]
keyword[return] identifier[out] | def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = 1
for (i, mul) in enumerate(shape):
acc *= int(mul)
if not acc < _INT64_MAX:
return i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return len(shape)
def maybe_lift(lab, size):
# promote nan values (assigned -1 label in lab array)
# so that all output values are non-negative
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(ensure_int64, labels)
if not xnull:
(labels, shape) = map(list, zip(*map(maybe_lift, labels, shape))) # depends on [control=['if'], data=[]]
labels = list(labels)
shape = list(shape)
# Iteratively process all the labels in chunks sized so less
# than _INT64_MAX unique int ids will be required for each chunk
while True:
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0 # depends on [control=['if'], data=[]]
else:
stride //= shape[i]
out += labels[i] * stride # depends on [control=['for'], data=['i']]
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1 # depends on [control=['for'], data=['lab']]
out[mask] = -1 # depends on [control=['if'], data=[]]
if nlev == len(shape): # all levels done!
break # depends on [control=['if'], data=[]]
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
(comp_ids, obs_ids) = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:] # depends on [control=['while'], data=[]]
return out |
def decrypt_ige(cipher_text, key, iv):
"""
Decrypts the given text in 16-bytes blocks by using the
given key and 32-bytes initialization vector.
"""
if cryptg:
return cryptg.decrypt_ige(cipher_text, key, iv)
if libssl.decrypt_ige:
return libssl.decrypt_ige(cipher_text, key, iv)
iv1 = iv[:len(iv) // 2]
iv2 = iv[len(iv) // 2:]
aes = pyaes.AES(key)
plain_text = []
blocks_count = len(cipher_text) // 16
cipher_text_block = [0] * 16
for block_index in range(blocks_count):
for i in range(16):
cipher_text_block[i] = \
cipher_text[block_index * 16 + i] ^ iv2[i]
plain_text_block = aes.decrypt(cipher_text_block)
for i in range(16):
plain_text_block[i] ^= iv1[i]
iv1 = cipher_text[block_index * 16:block_index * 16 + 16]
iv2 = plain_text_block
plain_text.extend(plain_text_block)
return bytes(plain_text) | def function[decrypt_ige, parameter[cipher_text, key, iv]]:
constant[
Decrypts the given text in 16-bytes blocks by using the
given key and 32-bytes initialization vector.
]
if name[cryptg] begin[:]
return[call[name[cryptg].decrypt_ige, parameter[name[cipher_text], name[key], name[iv]]]]
if name[libssl].decrypt_ige begin[:]
return[call[name[libssl].decrypt_ige, parameter[name[cipher_text], name[key], name[iv]]]]
variable[iv1] assign[=] call[name[iv]][<ast.Slice object at 0x7da1b21272e0>]
variable[iv2] assign[=] call[name[iv]][<ast.Slice object at 0x7da1b21277f0>]
variable[aes] assign[=] call[name[pyaes].AES, parameter[name[key]]]
variable[plain_text] assign[=] list[[]]
variable[blocks_count] assign[=] binary_operation[call[name[len], parameter[name[cipher_text]]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[16]]
variable[cipher_text_block] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b21247c0>]] * constant[16]]
for taget[name[block_index]] in starred[call[name[range], parameter[name[blocks_count]]]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[constant[16]]]] begin[:]
call[name[cipher_text_block]][name[i]] assign[=] binary_operation[call[name[cipher_text]][binary_operation[binary_operation[name[block_index] * constant[16]] + name[i]]] <ast.BitXor object at 0x7da2590d6b00> call[name[iv2]][name[i]]]
variable[plain_text_block] assign[=] call[name[aes].decrypt, parameter[name[cipher_text_block]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[16]]]] begin[:]
<ast.AugAssign object at 0x7da1b2127f70>
variable[iv1] assign[=] call[name[cipher_text]][<ast.Slice object at 0x7da1b21262f0>]
variable[iv2] assign[=] name[plain_text_block]
call[name[plain_text].extend, parameter[name[plain_text_block]]]
return[call[name[bytes], parameter[name[plain_text]]]] | keyword[def] identifier[decrypt_ige] ( identifier[cipher_text] , identifier[key] , identifier[iv] ):
literal[string]
keyword[if] identifier[cryptg] :
keyword[return] identifier[cryptg] . identifier[decrypt_ige] ( identifier[cipher_text] , identifier[key] , identifier[iv] )
keyword[if] identifier[libssl] . identifier[decrypt_ige] :
keyword[return] identifier[libssl] . identifier[decrypt_ige] ( identifier[cipher_text] , identifier[key] , identifier[iv] )
identifier[iv1] = identifier[iv] [: identifier[len] ( identifier[iv] )// literal[int] ]
identifier[iv2] = identifier[iv] [ identifier[len] ( identifier[iv] )// literal[int] :]
identifier[aes] = identifier[pyaes] . identifier[AES] ( identifier[key] )
identifier[plain_text] =[]
identifier[blocks_count] = identifier[len] ( identifier[cipher_text] )// literal[int]
identifier[cipher_text_block] =[ literal[int] ]* literal[int]
keyword[for] identifier[block_index] keyword[in] identifier[range] ( identifier[blocks_count] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[cipher_text_block] [ identifier[i] ]= identifier[cipher_text] [ identifier[block_index] * literal[int] + identifier[i] ]^ identifier[iv2] [ identifier[i] ]
identifier[plain_text_block] = identifier[aes] . identifier[decrypt] ( identifier[cipher_text_block] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[plain_text_block] [ identifier[i] ]^= identifier[iv1] [ identifier[i] ]
identifier[iv1] = identifier[cipher_text] [ identifier[block_index] * literal[int] : identifier[block_index] * literal[int] + literal[int] ]
identifier[iv2] = identifier[plain_text_block]
identifier[plain_text] . identifier[extend] ( identifier[plain_text_block] )
keyword[return] identifier[bytes] ( identifier[plain_text] ) | def decrypt_ige(cipher_text, key, iv):
"""
Decrypts the given text in 16-bytes blocks by using the
given key and 32-bytes initialization vector.
"""
if cryptg:
return cryptg.decrypt_ige(cipher_text, key, iv) # depends on [control=['if'], data=[]]
if libssl.decrypt_ige:
return libssl.decrypt_ige(cipher_text, key, iv) # depends on [control=['if'], data=[]]
iv1 = iv[:len(iv) // 2]
iv2 = iv[len(iv) // 2:]
aes = pyaes.AES(key)
plain_text = []
blocks_count = len(cipher_text) // 16
cipher_text_block = [0] * 16
for block_index in range(blocks_count):
for i in range(16):
cipher_text_block[i] = cipher_text[block_index * 16 + i] ^ iv2[i] # depends on [control=['for'], data=['i']]
plain_text_block = aes.decrypt(cipher_text_block)
for i in range(16):
plain_text_block[i] ^= iv1[i] # depends on [control=['for'], data=['i']]
iv1 = cipher_text[block_index * 16:block_index * 16 + 16]
iv2 = plain_text_block
plain_text.extend(plain_text_block) # depends on [control=['for'], data=['block_index']]
return bytes(plain_text) |
def doLayout(self, width):
"""
Align words in previous line.
"""
# Calculate dimensions
self.width = width
font_sizes = [0] + [frag.get("fontSize", 0) for frag in self]
self.fontSize = max(font_sizes)
self.height = self.lineHeight = max(frag * self.LINEHEIGHT for frag in font_sizes)
# Apply line height
y = (self.lineHeight - self.fontSize) # / 2
for frag in self:
frag["y"] = y
return self.height | def function[doLayout, parameter[self, width]]:
constant[
Align words in previous line.
]
name[self].width assign[=] name[width]
variable[font_sizes] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b12c9570>]] + <ast.ListComp object at 0x7da1b12cb100>]
name[self].fontSize assign[=] call[name[max], parameter[name[font_sizes]]]
name[self].height assign[=] call[name[max], parameter[<ast.GeneratorExp object at 0x7da18f09c940>]]
variable[y] assign[=] binary_operation[name[self].lineHeight - name[self].fontSize]
for taget[name[frag]] in starred[name[self]] begin[:]
call[name[frag]][constant[y]] assign[=] name[y]
return[name[self].height] | keyword[def] identifier[doLayout] ( identifier[self] , identifier[width] ):
literal[string]
identifier[self] . identifier[width] = identifier[width]
identifier[font_sizes] =[ literal[int] ]+[ identifier[frag] . identifier[get] ( literal[string] , literal[int] ) keyword[for] identifier[frag] keyword[in] identifier[self] ]
identifier[self] . identifier[fontSize] = identifier[max] ( identifier[font_sizes] )
identifier[self] . identifier[height] = identifier[self] . identifier[lineHeight] = identifier[max] ( identifier[frag] * identifier[self] . identifier[LINEHEIGHT] keyword[for] identifier[frag] keyword[in] identifier[font_sizes] )
identifier[y] =( identifier[self] . identifier[lineHeight] - identifier[self] . identifier[fontSize] )
keyword[for] identifier[frag] keyword[in] identifier[self] :
identifier[frag] [ literal[string] ]= identifier[y]
keyword[return] identifier[self] . identifier[height] | def doLayout(self, width):
"""
Align words in previous line.
"""
# Calculate dimensions
self.width = width
font_sizes = [0] + [frag.get('fontSize', 0) for frag in self]
self.fontSize = max(font_sizes)
self.height = self.lineHeight = max((frag * self.LINEHEIGHT for frag in font_sizes))
# Apply line height
y = self.lineHeight - self.fontSize # / 2
for frag in self:
frag['y'] = y # depends on [control=['for'], data=['frag']]
return self.height |
def updated_time_delta(self):
"""Returns the number of seconds ago the issue was updated from current time.
"""
local_timezone = tzlocal()
update_at = datetime.datetime.strptime(self.updated_at, '%Y-%m-%dT%XZ')
update_at_utc = pytz.utc.localize(update_at)
update_at_local = update_at_utc.astimezone(local_timezone)
delta = datetime.datetime.now(local_timezone) - update_at_local
return int(delta.total_seconds()) | def function[updated_time_delta, parameter[self]]:
constant[Returns the number of seconds ago the issue was updated from current time.
]
variable[local_timezone] assign[=] call[name[tzlocal], parameter[]]
variable[update_at] assign[=] call[name[datetime].datetime.strptime, parameter[name[self].updated_at, constant[%Y-%m-%dT%XZ]]]
variable[update_at_utc] assign[=] call[name[pytz].utc.localize, parameter[name[update_at]]]
variable[update_at_local] assign[=] call[name[update_at_utc].astimezone, parameter[name[local_timezone]]]
variable[delta] assign[=] binary_operation[call[name[datetime].datetime.now, parameter[name[local_timezone]]] - name[update_at_local]]
return[call[name[int], parameter[call[name[delta].total_seconds, parameter[]]]]] | keyword[def] identifier[updated_time_delta] ( identifier[self] ):
literal[string]
identifier[local_timezone] = identifier[tzlocal] ()
identifier[update_at] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[self] . identifier[updated_at] , literal[string] )
identifier[update_at_utc] = identifier[pytz] . identifier[utc] . identifier[localize] ( identifier[update_at] )
identifier[update_at_local] = identifier[update_at_utc] . identifier[astimezone] ( identifier[local_timezone] )
identifier[delta] = identifier[datetime] . identifier[datetime] . identifier[now] ( identifier[local_timezone] )- identifier[update_at_local]
keyword[return] identifier[int] ( identifier[delta] . identifier[total_seconds] ()) | def updated_time_delta(self):
"""Returns the number of seconds ago the issue was updated from current time.
"""
local_timezone = tzlocal()
update_at = datetime.datetime.strptime(self.updated_at, '%Y-%m-%dT%XZ')
update_at_utc = pytz.utc.localize(update_at)
update_at_local = update_at_utc.astimezone(local_timezone)
delta = datetime.datetime.now(local_timezone) - update_at_local
return int(delta.total_seconds()) |
def filter_search(self, search):
"""Filter given search by the filter parameter given in request.
:param search: ElasticSearch query object
"""
builder = QueryBuilder(
self.filtering_fields,
self.filtering_map,
self
)
search, unmatched = builder.build(search, self.get_query_params())
# Ensure that no unsupported arguments were used.
for argument in self.get_always_allowed_arguments():
unmatched.pop(argument, None)
if unmatched:
msg = 'Unsupported parameter(s): {}. Please use a combination of: {}.'.format(
', '.join(unmatched),
', '.join(self.filtering_fields),
)
raise ParseError(msg)
return search | def function[filter_search, parameter[self, search]]:
constant[Filter given search by the filter parameter given in request.
:param search: ElasticSearch query object
]
variable[builder] assign[=] call[name[QueryBuilder], parameter[name[self].filtering_fields, name[self].filtering_map, name[self]]]
<ast.Tuple object at 0x7da1b1a2d090> assign[=] call[name[builder].build, parameter[name[search], call[name[self].get_query_params, parameter[]]]]
for taget[name[argument]] in starred[call[name[self].get_always_allowed_arguments, parameter[]]] begin[:]
call[name[unmatched].pop, parameter[name[argument], constant[None]]]
if name[unmatched] begin[:]
variable[msg] assign[=] call[constant[Unsupported parameter(s): {}. Please use a combination of: {}.].format, parameter[call[constant[, ].join, parameter[name[unmatched]]], call[constant[, ].join, parameter[name[self].filtering_fields]]]]
<ast.Raise object at 0x7da1b1a2f2e0>
return[name[search]] | keyword[def] identifier[filter_search] ( identifier[self] , identifier[search] ):
literal[string]
identifier[builder] = identifier[QueryBuilder] (
identifier[self] . identifier[filtering_fields] ,
identifier[self] . identifier[filtering_map] ,
identifier[self]
)
identifier[search] , identifier[unmatched] = identifier[builder] . identifier[build] ( identifier[search] , identifier[self] . identifier[get_query_params] ())
keyword[for] identifier[argument] keyword[in] identifier[self] . identifier[get_always_allowed_arguments] ():
identifier[unmatched] . identifier[pop] ( identifier[argument] , keyword[None] )
keyword[if] identifier[unmatched] :
identifier[msg] = literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[unmatched] ),
literal[string] . identifier[join] ( identifier[self] . identifier[filtering_fields] ),
)
keyword[raise] identifier[ParseError] ( identifier[msg] )
keyword[return] identifier[search] | def filter_search(self, search):
"""Filter given search by the filter parameter given in request.
:param search: ElasticSearch query object
"""
builder = QueryBuilder(self.filtering_fields, self.filtering_map, self)
(search, unmatched) = builder.build(search, self.get_query_params())
# Ensure that no unsupported arguments were used.
for argument in self.get_always_allowed_arguments():
unmatched.pop(argument, None) # depends on [control=['for'], data=['argument']]
if unmatched:
msg = 'Unsupported parameter(s): {}. Please use a combination of: {}.'.format(', '.join(unmatched), ', '.join(self.filtering_fields))
raise ParseError(msg) # depends on [control=['if'], data=[]]
return search |
def set_db_application_prefix(prefix, sep=None):
"""Set the global app prefix and separator."""
global _APPLICATION_PREFIX, _APPLICATION_SEP
_APPLICATION_PREFIX = prefix
if (sep is not None):
_APPLICATION_SEP = sep | def function[set_db_application_prefix, parameter[prefix, sep]]:
constant[Set the global app prefix and separator.]
<ast.Global object at 0x7da207f02620>
variable[_APPLICATION_PREFIX] assign[=] name[prefix]
if compare[name[sep] is_not constant[None]] begin[:]
variable[_APPLICATION_SEP] assign[=] name[sep] | keyword[def] identifier[set_db_application_prefix] ( identifier[prefix] , identifier[sep] = keyword[None] ):
literal[string]
keyword[global] identifier[_APPLICATION_PREFIX] , identifier[_APPLICATION_SEP]
identifier[_APPLICATION_PREFIX] = identifier[prefix]
keyword[if] ( identifier[sep] keyword[is] keyword[not] keyword[None] ):
identifier[_APPLICATION_SEP] = identifier[sep] | def set_db_application_prefix(prefix, sep=None):
"""Set the global app prefix and separator."""
global _APPLICATION_PREFIX, _APPLICATION_SEP
_APPLICATION_PREFIX = prefix
if sep is not None:
_APPLICATION_SEP = sep # depends on [control=['if'], data=['sep']] |
def format(self, record):
"""
Apply level-specific styling to log records.
:param record: A :class:`~logging.LogRecord` object.
:returns: The result of :func:`logging.Formatter.format()`.
This method injects ANSI escape sequences that are specific to the
level of each log record (because such logic cannot be expressed in the
syntax of a log format string). It works by making a copy of the log
record, changing the `msg` field inside the copy and passing the copy
into the :func:`~logging.Formatter.format()` method of the base
class.
"""
style = self.nn.get(self.level_styles, record.levelname)
# After the introduction of the `Empty' class it was reported in issue
# 33 that format() can be called when `Empty' has already been garbage
# collected. This explains the (otherwise rather out of place) `Empty
# is not None' check in the following `if' statement. The reasoning
# here is that it's much better to log a message without formatting
# then to raise an exception ;-).
#
# For more details refer to issue 33 on GitHub:
# https://github.com/xolox/python-coloredlogs/issues/33
if style and Empty is not None:
# Due to the way that Python's logging module is structured and
# documented the only (IMHO) clean way to customize its behavior is
# to change incoming LogRecord objects before they get to the base
# formatter. However we don't want to break other formatters and
# handlers, so we copy the log record.
#
# In the past this used copy.copy() but as reported in issue 29
# (which is reproducible) this can cause deadlocks. The following
# Python voodoo is intended to accomplish the same thing as
# copy.copy() without all of the generalization and overhead that
# we don't need for our -very limited- use case.
#
# For more details refer to issue 29 on GitHub:
# https://github.com/xolox/python-coloredlogs/issues/29
copy = Empty()
copy.__class__ = (
self.log_record_factory()
if self.log_record_factory is not None
else logging.LogRecord
)
copy.__dict__.update(record.__dict__)
copy.msg = ansi_wrap(coerce_string(record.msg), **style)
record = copy
# Delegate the remaining formatting to the base formatter.
return logging.Formatter.format(self, record) | def function[format, parameter[self, record]]:
constant[
Apply level-specific styling to log records.
:param record: A :class:`~logging.LogRecord` object.
:returns: The result of :func:`logging.Formatter.format()`.
This method injects ANSI escape sequences that are specific to the
level of each log record (because such logic cannot be expressed in the
syntax of a log format string). It works by making a copy of the log
record, changing the `msg` field inside the copy and passing the copy
into the :func:`~logging.Formatter.format()` method of the base
class.
]
variable[style] assign[=] call[name[self].nn.get, parameter[name[self].level_styles, name[record].levelname]]
if <ast.BoolOp object at 0x7da1b069b190> begin[:]
variable[copy] assign[=] call[name[Empty], parameter[]]
name[copy].__class__ assign[=] <ast.IfExp object at 0x7da1b069bc40>
call[name[copy].__dict__.update, parameter[name[record].__dict__]]
name[copy].msg assign[=] call[name[ansi_wrap], parameter[call[name[coerce_string], parameter[name[record].msg]]]]
variable[record] assign[=] name[copy]
return[call[name[logging].Formatter.format, parameter[name[self], name[record]]]] | keyword[def] identifier[format] ( identifier[self] , identifier[record] ):
literal[string]
identifier[style] = identifier[self] . identifier[nn] . identifier[get] ( identifier[self] . identifier[level_styles] , identifier[record] . identifier[levelname] )
keyword[if] identifier[style] keyword[and] identifier[Empty] keyword[is] keyword[not] keyword[None] :
identifier[copy] = identifier[Empty] ()
identifier[copy] . identifier[__class__] =(
identifier[self] . identifier[log_record_factory] ()
keyword[if] identifier[self] . identifier[log_record_factory] keyword[is] keyword[not] keyword[None]
keyword[else] identifier[logging] . identifier[LogRecord]
)
identifier[copy] . identifier[__dict__] . identifier[update] ( identifier[record] . identifier[__dict__] )
identifier[copy] . identifier[msg] = identifier[ansi_wrap] ( identifier[coerce_string] ( identifier[record] . identifier[msg] ),** identifier[style] )
identifier[record] = identifier[copy]
keyword[return] identifier[logging] . identifier[Formatter] . identifier[format] ( identifier[self] , identifier[record] ) | def format(self, record):
"""
Apply level-specific styling to log records.
:param record: A :class:`~logging.LogRecord` object.
:returns: The result of :func:`logging.Formatter.format()`.
This method injects ANSI escape sequences that are specific to the
level of each log record (because such logic cannot be expressed in the
syntax of a log format string). It works by making a copy of the log
record, changing the `msg` field inside the copy and passing the copy
into the :func:`~logging.Formatter.format()` method of the base
class.
"""
style = self.nn.get(self.level_styles, record.levelname)
# After the introduction of the `Empty' class it was reported in issue
# 33 that format() can be called when `Empty' has already been garbage
# collected. This explains the (otherwise rather out of place) `Empty
# is not None' check in the following `if' statement. The reasoning
# here is that it's much better to log a message without formatting
# then to raise an exception ;-).
#
# For more details refer to issue 33 on GitHub:
# https://github.com/xolox/python-coloredlogs/issues/33
if style and Empty is not None:
# Due to the way that Python's logging module is structured and
# documented the only (IMHO) clean way to customize its behavior is
# to change incoming LogRecord objects before they get to the base
# formatter. However we don't want to break other formatters and
# handlers, so we copy the log record.
#
# In the past this used copy.copy() but as reported in issue 29
# (which is reproducible) this can cause deadlocks. The following
# Python voodoo is intended to accomplish the same thing as
# copy.copy() without all of the generalization and overhead that
# we don't need for our -very limited- use case.
#
# For more details refer to issue 29 on GitHub:
# https://github.com/xolox/python-coloredlogs/issues/29
copy = Empty()
copy.__class__ = self.log_record_factory() if self.log_record_factory is not None else logging.LogRecord
copy.__dict__.update(record.__dict__)
copy.msg = ansi_wrap(coerce_string(record.msg), **style)
record = copy # depends on [control=['if'], data=[]]
# Delegate the remaining formatting to the base formatter.
return logging.Formatter.format(self, record) |
def hide_tooltip_if_necessary(self, key):
"""Hide calltip when necessary"""
try:
calltip_char = self.get_character(self.calltip_position)
before = self.is_cursor_before(self.calltip_position,
char_offset=1)
other = key in (Qt.Key_ParenRight, Qt.Key_Period, Qt.Key_Tab)
if calltip_char not in ('?', '(') or before or other:
QToolTip.hideText()
except (IndexError, TypeError):
QToolTip.hideText() | def function[hide_tooltip_if_necessary, parameter[self, key]]:
constant[Hide calltip when necessary]
<ast.Try object at 0x7da18bcc9b70> | keyword[def] identifier[hide_tooltip_if_necessary] ( identifier[self] , identifier[key] ):
literal[string]
keyword[try] :
identifier[calltip_char] = identifier[self] . identifier[get_character] ( identifier[self] . identifier[calltip_position] )
identifier[before] = identifier[self] . identifier[is_cursor_before] ( identifier[self] . identifier[calltip_position] ,
identifier[char_offset] = literal[int] )
identifier[other] = identifier[key] keyword[in] ( identifier[Qt] . identifier[Key_ParenRight] , identifier[Qt] . identifier[Key_Period] , identifier[Qt] . identifier[Key_Tab] )
keyword[if] identifier[calltip_char] keyword[not] keyword[in] ( literal[string] , literal[string] ) keyword[or] identifier[before] keyword[or] identifier[other] :
identifier[QToolTip] . identifier[hideText] ()
keyword[except] ( identifier[IndexError] , identifier[TypeError] ):
identifier[QToolTip] . identifier[hideText] () | def hide_tooltip_if_necessary(self, key):
"""Hide calltip when necessary"""
try:
calltip_char = self.get_character(self.calltip_position)
before = self.is_cursor_before(self.calltip_position, char_offset=1)
other = key in (Qt.Key_ParenRight, Qt.Key_Period, Qt.Key_Tab)
if calltip_char not in ('?', '(') or before or other:
QToolTip.hideText() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (IndexError, TypeError):
QToolTip.hideText() # depends on [control=['except'], data=[]] |
def set_ctype(self, ctype, orig_ctype=None):
"""
Set the selected content type. Will not override the value of
the content type if that has already been determined.
:param ctype: The content type string to set.
:param orig_ctype: The original content type, as found in the
configuration.
"""
if self.ctype is None:
self.ctype = ctype
self.orig_ctype = orig_ctype | def function[set_ctype, parameter[self, ctype, orig_ctype]]:
constant[
Set the selected content type. Will not override the value of
the content type if that has already been determined.
:param ctype: The content type string to set.
:param orig_ctype: The original content type, as found in the
configuration.
]
if compare[name[self].ctype is constant[None]] begin[:]
name[self].ctype assign[=] name[ctype]
name[self].orig_ctype assign[=] name[orig_ctype] | keyword[def] identifier[set_ctype] ( identifier[self] , identifier[ctype] , identifier[orig_ctype] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[ctype] keyword[is] keyword[None] :
identifier[self] . identifier[ctype] = identifier[ctype]
identifier[self] . identifier[orig_ctype] = identifier[orig_ctype] | def set_ctype(self, ctype, orig_ctype=None):
"""
Set the selected content type. Will not override the value of
the content type if that has already been determined.
:param ctype: The content type string to set.
:param orig_ctype: The original content type, as found in the
configuration.
"""
if self.ctype is None:
self.ctype = ctype
self.orig_ctype = orig_ctype # depends on [control=['if'], data=[]] |
def commit(self) -> ResponseCommit:
"""Return the current encode state value to tendermint"""
hash = struct.pack('>Q', self.txCount)
return ResponseCommit(data=hash) | def function[commit, parameter[self]]:
constant[Return the current encode state value to tendermint]
variable[hash] assign[=] call[name[struct].pack, parameter[constant[>Q], name[self].txCount]]
return[call[name[ResponseCommit], parameter[]]] | keyword[def] identifier[commit] ( identifier[self] )-> identifier[ResponseCommit] :
literal[string]
identifier[hash] = identifier[struct] . identifier[pack] ( literal[string] , identifier[self] . identifier[txCount] )
keyword[return] identifier[ResponseCommit] ( identifier[data] = identifier[hash] ) | def commit(self) -> ResponseCommit:
"""Return the current encode state value to tendermint"""
hash = struct.pack('>Q', self.txCount)
return ResponseCommit(data=hash) |
def item_afdeling_adapter(obj, request):
"""
Adapter for rendering an object of
:class: `crabpy.gateway.capakey.Afdeling` to json.
"""
return {
'id': obj.id,
'naam': obj.naam,
'gemeente': {
'id': obj.gemeente.id,
'naam': obj.gemeente.naam
},
'centroid': obj.centroid,
'bounding_box': obj.bounding_box
} | def function[item_afdeling_adapter, parameter[obj, request]]:
constant[
Adapter for rendering an object of
:class: `crabpy.gateway.capakey.Afdeling` to json.
]
return[dictionary[[<ast.Constant object at 0x7da1b0949fc0>, <ast.Constant object at 0x7da1b09485b0>, <ast.Constant object at 0x7da1b0a66ce0>, <ast.Constant object at 0x7da1b0a65150>, <ast.Constant object at 0x7da1b0a66560>], [<ast.Attribute object at 0x7da1b0a65420>, <ast.Attribute object at 0x7da1b0a64d00>, <ast.Dict object at 0x7da1b0a66ad0>, <ast.Attribute object at 0x7da1b0a64dc0>, <ast.Attribute object at 0x7da1b0a64be0>]]] | keyword[def] identifier[item_afdeling_adapter] ( identifier[obj] , identifier[request] ):
literal[string]
keyword[return] {
literal[string] : identifier[obj] . identifier[id] ,
literal[string] : identifier[obj] . identifier[naam] ,
literal[string] :{
literal[string] : identifier[obj] . identifier[gemeente] . identifier[id] ,
literal[string] : identifier[obj] . identifier[gemeente] . identifier[naam]
},
literal[string] : identifier[obj] . identifier[centroid] ,
literal[string] : identifier[obj] . identifier[bounding_box]
} | def item_afdeling_adapter(obj, request):
"""
Adapter for rendering an object of
:class: `crabpy.gateway.capakey.Afdeling` to json.
"""
return {'id': obj.id, 'naam': obj.naam, 'gemeente': {'id': obj.gemeente.id, 'naam': obj.gemeente.naam}, 'centroid': obj.centroid, 'bounding_box': obj.bounding_box} |
def make_thumbnail_name(image_name, extension):
"""Return name of the downloaded thumbnail, based on the extension."""
file_name, _ = os.path.splitext(image_name)
return file_name + '.' + clean_extension(extension) | def function[make_thumbnail_name, parameter[image_name, extension]]:
constant[Return name of the downloaded thumbnail, based on the extension.]
<ast.Tuple object at 0x7da18c4cd240> assign[=] call[name[os].path.splitext, parameter[name[image_name]]]
return[binary_operation[binary_operation[name[file_name] + constant[.]] + call[name[clean_extension], parameter[name[extension]]]]] | keyword[def] identifier[make_thumbnail_name] ( identifier[image_name] , identifier[extension] ):
literal[string]
identifier[file_name] , identifier[_] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[image_name] )
keyword[return] identifier[file_name] + literal[string] + identifier[clean_extension] ( identifier[extension] ) | def make_thumbnail_name(image_name, extension):
"""Return name of the downloaded thumbnail, based on the extension."""
(file_name, _) = os.path.splitext(image_name)
return file_name + '.' + clean_extension(extension) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.